1
0
Fork 0

Merging upstream version 2.9.1.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-16 12:26:52 +01:00
parent bb95f41000
commit 698d985f9d
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
451 changed files with 5896 additions and 2734 deletions

File diff suppressed because it is too large Load diff

View file

@ -9,18 +9,36 @@
PLUGIN(NAME("memblaze", "Memblaze vendor specific extensions", NVME_VERSION),
COMMAND_LIST(
ENTRY("smart-log-add", "Retrieve Memblaze SMART Log, show it", mb_get_additional_smart_log)
ENTRY("get-pm-status", "Get Memblaze Power Manager Status", mb_get_powermanager_status)
ENTRY("set-pm-status", "Set Memblaze Power Manager Status", mb_set_powermanager_status)
ENTRY("select-download", "Selective Firmware Download", mb_selective_download)
ENTRY("lat-stats", "Enable and disable Latency Statistics logging", mb_set_lat_stats)
ENTRY("lat-stats-print", "Retrieve IO Latency Statistics log, show it", mb_lat_stats_log_print)
ENTRY("lat-log", "Set Memblaze High Latency Log", mb_set_high_latency_log)
ENTRY("lat-log-print", "Output Memblaze High Latency Log", mb_high_latency_log_print)
ENTRY("clear-error-log", "Clear error log", memblaze_clear_error_log)
ENTRY("smart-log-add-x", "Retrieve Memblaze SMART Log, show it", mb_get_smart_log_add)
ENTRY("lat-set-feature-x", "Set Enable/Disable for Latency Monitor feature", mb_set_latency_feature)
ENTRY("lat-get-feature-x", "Get Enabled/Disabled of Latency Monitor feature", mb_get_latency_feature)
ENTRY("smart-log-add", "Retrieve Memblaze SMART Log, show it",
mb_get_additional_smart_log)
ENTRY("get-pm-status", "Get Memblaze Power Manager Status",
mb_get_powermanager_status)
ENTRY("set-pm-status", "Set Memblaze Power Manager Status",
mb_set_powermanager_status)
ENTRY("select-download", "Selective Firmware Download",
mb_selective_download)
ENTRY("lat-stats", "Enable and disable Latency Statistics logging",
mb_set_lat_stats)
ENTRY("lat-stats-print", "Retrieve IO Latency Statistics log, show it",
mb_lat_stats_log_print)
ENTRY("lat-log", "Set Memblaze High Latency Log",
mb_set_high_latency_log)
ENTRY("lat-log-print", "Output Memblaze High Latency Log",
mb_high_latency_log_print)
ENTRY("clear-error-log", "Clear error log",
memblaze_clear_error_log)
ENTRY("smart-log-add-x", "Retrieve Memblaze SMART Log, show it",
mb_get_smart_log_add)
ENTRY("lat-set-feature-x", "Set Enable/Disable for Latency Monitor feature",
mb_set_latency_feature)
ENTRY("lat-get-feature-x", "Get Enabled/Disabled of Latency Monitor feature",
mb_get_latency_feature)
ENTRY("lat-stats-print-x", "Get Latency Statistics log and show it.",
mb_get_latency_stats)
ENTRY("lat-log-print-x", "Get High Latency log and show it.",
mb_get_high_latency_log)
ENTRY("perf-stats-print-x", "Get Performance Stat log and show it.",
mb_get_performance_stats)
)
);

View file

@ -26,10 +26,11 @@ if json_c_dep.found()
'plugins/wdc/wdc-utils.c',
'plugins/ymtc/ymtc-nvme.c',
'plugins/zns/zns.c',
'plugins/ssstc/ssstc-nvme.c',
]
subdir('solidigm')
subdir('ocp')
if conf.has('HAVE_SED_OPAL')
if conf.get('HAVE_SED_OPAL') != 0
subdir('sed')
endif
endif

View file

@ -132,34 +132,24 @@ static enum eDriveModel GetDriveModel(int idx)
if (vendor_id == MICRON_VENDOR_ID) {
switch (device_id) {
case 0x5196:
fallthrough;
case 0x51A0:
fallthrough;
case 0x51A1:
fallthrough;
case 0x51A2:
eModel = M51AX;
break;
case 0x51B0:
fallthrough;
case 0x51B1:
fallthrough;
case 0x51B2:
eModel = M51BX;
break;
case 0x51C0:
fallthrough;
case 0x51C1:
fallthrough;
case 0x51C2:
fallthrough;
case 0x51C3:
eModel = M51CX;
break;
case 0x5405:
fallthrough;
case 0x5406:
fallthrough;
case 0x5407:
eModel = M5407;
break;
@ -226,7 +216,6 @@ static int SetupDebugDataDirectories(char *strSN, char *strFilePath,
int length = 0;
int nIndex = 0;
char *strTemp = NULL;
struct stat dirStat;
int j;
int k = 0;
int i = 0;
@ -304,18 +293,17 @@ static int SetupDebugDataDirectories(char *strSN, char *strFilePath,
strMainDirName[nIndex] = '\0';
j = 1;
while (!stat(strMainDirName, &dirStat)) {
while (mkdir(strMainDirName, 0777) < 0) {
if (errno != EEXIST) {
err = -1;
goto exit_status;
}
strMainDirName[nIndex] = '\0';
sprintf(strAppend, "-%d", j);
strcat(strMainDirName, strAppend);
j++;
}
if (mkdir(strMainDirName, 0777) < 0) {
err = -1;
goto exit_status;
}
if (strOSDirName) {
sprintf(strOSDirName, "%s/%s", strMainDirName, "OS");
if (mkdir(strOSDirName, 0777) < 0) {
@ -331,7 +319,7 @@ static int SetupDebugDataDirectories(char *strSN, char *strFilePath,
rmdir(strOSDirName);
rmdir(strMainDirName);
err = -1;
}
}
}
exit_status:
@ -3217,28 +3205,20 @@ static int micron_internal_logs(int argc, char **argv, struct command *cmd,
err = -1;
switch (aVendorLogs[i].ucLogPage) {
case 0xE1:
fallthrough;
case 0xE5:
fallthrough;
case 0xE9:
err = 1;
break;
case 0xE2:
fallthrough;
case 0xE3:
fallthrough;
case 0xE4:
fallthrough;
case 0xE8:
fallthrough;
case 0xEA:
err = get_common_log(dev_fd(dev), aVendorLogs[i].ucLogPage,
&dataBuffer, &bSize);
break;
case 0xC1:
fallthrough;
case 0xC2:
fallthrough;
case 0xC4:
err = GetLogPageSize(dev_fd(dev), aVendorLogs[i].ucLogPage,
&bSize);
@ -3247,7 +3227,6 @@ static int micron_internal_logs(int argc, char **argv, struct command *cmd,
&dataBuffer, bSize);
break;
case 0xE6:
fallthrough;
case 0xE7:
puiIDDBuf = (unsigned int *)&ctrl;
uiMask = puiIDDBuf[1015];
@ -3273,11 +3252,8 @@ static int micron_internal_logs(int argc, char **argv, struct command *cmd,
}
break;
case 0xF7:
fallthrough;
case 0xF9:
fallthrough;
case 0xFC:
fallthrough;
case 0xFD:
if (eModel == M51BX)
(void)NVMEResetLog(dev_fd(dev), aVendorLogs[i].ucLogPage,

View file

@ -4,11 +4,12 @@
#include <stdio.h>
#include <fnmatch.h>
#include <libnvme.h>
#include "nvme-print.h"
#include "nvme.h"
#include "nbft.h"
#include "libnvme.h"
#include "fabrics.h"
#include "util/logging.h"
#define CREATE_CMD
#include "nbft-plugin.h"
@ -168,7 +169,11 @@ static json_object *ssns_to_json(struct nbft_info_subsystem_ns *ss)
|| json_object_add_value_int(ss_json, "pdu_header_digest_required",
ss->pdu_header_digest_required)
|| json_object_add_value_int(ss_json, "data_digest_required",
ss->data_digest_required))
ss->data_digest_required)
|| json_object_add_value_int(ss_json, "discovered",
ss->discovered)
|| json_object_add_value_int(ss_json, "unavailable",
ss->unavailable))
goto fail;
return ss_json;
@ -319,7 +324,7 @@ static int json_show_nbfts(struct list_head *nbft_list, bool show_subsys,
bool show_hfi, bool show_discovery)
{
struct json_object *nbft_json_array, *nbft_json;
struct nbft_file_entry *entry;
struct nbft_file_entry *entry = NULL;
nbft_json_array = json_create_array();
if (!nbft_json_array)
@ -510,7 +515,7 @@ static void normal_show_nbfts(struct list_head *nbft_list, bool show_subsys,
bool show_hfi, bool show_discovery)
{
bool not_first = false;
struct nbft_file_entry *entry;
struct nbft_file_entry *entry = NULL;
list_for_each(nbft_list, entry, node) {
if (not_first)
@ -529,6 +534,8 @@ int show_nbft(int argc, char **argv, struct command *cmd, struct plugin *plugin)
enum nvme_print_flags flags;
int ret;
bool show_subsys = false, show_hfi = false, show_discovery = false;
unsigned int verbose = 0;
nvme_root_t r;
OPT_ARGS(opts) = {
OPT_FMT("output-format", 'o', &format, "Output format: normal|json"),
@ -536,6 +543,7 @@ int show_nbft(int argc, char **argv, struct command *cmd, struct plugin *plugin)
OPT_FLAG("hfi", 'H', &show_hfi, "show NBFT HFIs"),
OPT_FLAG("discovery", 'd', &show_discovery, "show NBFT discovery controllers"),
OPT_STRING("nbft-path", 0, "STR", &nbft_path, "user-defined path for NBFT tables"),
OPT_INCR("verbose", 'v', &verbose, "Increase logging verbosity"),
OPT_END()
};
@ -543,10 +551,15 @@ int show_nbft(int argc, char **argv, struct command *cmd, struct plugin *plugin)
if (ret)
return ret;
log_level = map_log_level(verbose, false /* quiet */);
ret = validate_output_format(format, &flags);
if (ret < 0)
return ret;
/* initialize libnvme logging */
r = nvme_create_root(stderr, log_level);
if (!(show_subsys || show_hfi || show_discovery))
show_subsys = show_hfi = show_discovery = true;
@ -559,5 +572,6 @@ int show_nbft(int argc, char **argv, struct command *cmd, struct plugin *plugin)
ret = json_show_nbfts(&nbft_list, show_subsys, show_hfi, show_discovery);
free_nbfts(&nbft_list);
}
nvme_free_tree(r);
return ret;
}

View file

@ -46,12 +46,14 @@ enum {
enum {
ONTAP_C2_LOG_SUPPORTED_LSP = 0x0,
ONTAP_C2_LOG_NSINFO_LSP = 0x1,
ONTAP_C2_LOG_PLATFORM_LSP = 0x2,
};
enum {
ONTAP_VSERVER_TLV = 0x11,
ONTAP_VOLUME_TLV = 0x12,
ONTAP_NS_TLV = 0x13,
ONTAP_VSERVER_NAME_TLV = 0x11,
ONTAP_VOLUME_NAME_TLV = 0x12,
ONTAP_NS_NAME_TLV = 0x13,
ONTAP_NS_PATH_TLV = 0x14,
};
static const char *dev_path = "/dev/";
@ -134,8 +136,10 @@ static void netapp_get_ontap_labels(char *vsname, char *nspath,
unsigned char *log_data)
{
int lsp, tlv, label_len;
char *vserver_name, *volume_name, *namespace_name;
char *vserver_name, *volume_name, *namespace_name, *namespace_path;
char vol_name[ONTAP_LABEL_LEN], ns_name[ONTAP_LABEL_LEN];
char ns_path[ONTAP_LABEL_LEN];
bool nspath_tlv_available = false;
const char *ontap_vol = "/vol/";
int i, j;
@ -145,9 +149,9 @@ static void netapp_get_ontap_labels(char *vsname, char *nspath,
/* lsp not related to nsinfo */
return;
/* get the vserver tlv and name */
/* get the vserver name tlv */
tlv = *(__u8 *)&log_data[32];
if (tlv == ONTAP_VSERVER_TLV) {
if (tlv == ONTAP_VSERVER_NAME_TLV) {
label_len = (*(__u16 *)&log_data[34]) * 4;
vserver_name = (char *)&log_data[36];
ontap_labels_to_str(vsname, vserver_name, label_len);
@ -159,9 +163,9 @@ static void netapp_get_ontap_labels(char *vsname, char *nspath,
i = 36 + label_len;
j = i + 2;
/* get the volume tlv and name */
/* get the volume name tlv */
tlv = *(__u8 *)&log_data[i];
if (tlv == ONTAP_VOLUME_TLV) {
if (tlv == ONTAP_VOLUME_NAME_TLV) {
label_len = (*(__u16 *)&log_data[j]) * 4;
volume_name = (char *)&log_data[j + 2];
ontap_labels_to_str(vol_name, volume_name, label_len);
@ -173,9 +177,9 @@ static void netapp_get_ontap_labels(char *vsname, char *nspath,
i += 4 + label_len;
j += 4 + label_len;
/* get the namespace tlv and name */
/* get the namespace name tlv */
tlv = *(__u8 *)&log_data[i];
if (tlv == ONTAP_NS_TLV) {
if (tlv == ONTAP_NS_NAME_TLV) {
label_len = (*(__u16 *)&log_data[j]) * 4;
namespace_name = (char *)&log_data[j + 2];
ontap_labels_to_str(ns_name, namespace_name, label_len);
@ -185,8 +189,25 @@ static void netapp_get_ontap_labels(char *vsname, char *nspath,
return;
}
snprintf(nspath, ONTAP_NS_PATHLEN, "%s%s%s%s", ontap_vol,
i += 4 + label_len;
j += 4 + label_len;
/* get the namespace path tlv if available */
tlv = *(__u8 *)&log_data[i];
if (tlv == ONTAP_NS_PATH_TLV) {
nspath_tlv_available = true;
label_len = (*(__u16 *)&log_data[j]) * 4;
namespace_path = (char *)&log_data[j + 2];
ontap_labels_to_str(ns_path, namespace_path, label_len);
}
if (nspath_tlv_available) {
/* set nspath from the corresponding ns_path string */
snprintf(nspath, ONTAP_NS_PATHLEN, "%s", ns_path);
} else {
/* set nspath by concatenating ontap_vol with ns_name */
snprintf(nspath, ONTAP_NS_PATHLEN, "%s%s%s%s", ontap_vol,
vol_name, "/", ns_name);
}
}
static void netapp_smdevice_json(struct json_object *devices, char *devname,

View file

@ -18,7 +18,7 @@ static int ocp_clear_feature(int argc, char **argv, const char *desc, const __u8
__u32 result = 0;
__u32 clear = 1 << 31;
struct nvme_dev *dev;
int uuid_index = 0;
__u8 uuid_index = 0;
bool uuid = true;
int err;

View file

@ -66,7 +66,7 @@ static void ocp_fw_activation_history_normal(const struct fw_activation_history
printf(" %-22s%d\n", "activation count:",
le16_to_cpu(entry->activation_count));
printf(" %-22s%"PRIu64"\n", "timestamp:",
le64_to_cpu(entry->timestamp));
(0x0000FFFFFFFFFFFF & le64_to_cpu(entry->timestamp)));
printf(" %-22s%"PRIu64"\n", "power cycle count:",
le64_to_cpu(entry->power_cycle_count));
printf(" %-22s%.*s\n", "previous firmware:", (int)sizeof(entry->previous_fw),
@ -106,7 +106,7 @@ static void ocp_fw_activation_history_json(const struct fw_activation_history *f
json_object_add_value_uint(entry_obj, "activation count",
le16_to_cpu(entry->activation_count));
json_object_add_value_uint64(entry_obj, "timestamp",
le64_to_cpu(entry->timestamp));
(0x0000FFFFFFFFFFFF & le64_to_cpu(entry->timestamp)));
json_object_add_value_uint(entry_obj, "power cycle count",
le64_to_cpu(entry->power_cycle_count));
@ -162,7 +162,7 @@ int ocp_fw_activation_history_log(int argc, char **argv, struct command *cmd,
if (err)
return err;
int uuid_index = 0;
__u8 uuid_index = 0;
/*
* Best effort attempt at uuid. Otherwise, assume no index (i.e. 0)

File diff suppressed because it is too large Load diff

View file

@ -30,6 +30,9 @@ PLUGIN(NAME("ocp", "OCP cloud SSD extensions", NVME_VERSION),
ENTRY("set-plp-health-check-interval", "Set PLP Health Check Interval", set_plp_health_check_interval)
ENTRY("get-plp-health-check-interval", "Get PLP Health Check Interval", get_plp_health_check_interval)
ENTRY("telemetry-string-log", "Retrieve Telemetry string Log Page", ocp_telemetry_str_log_format)
ENTRY("set-telemetry-profile", "Set Telemetry Profile Feature", ocp_set_telemetry_profile_feature)
ENTRY("set-dssd-async-event-config", "Set DSSD Async Event Config", set_dssd_async_event_config)
ENTRY("get-dssd-async-event-config", "Get DSSD Async Event Config", get_dssd_async_event_config)
)
);

View file

@ -1,19 +1,32 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2022 Solidigm.
* Copyright (c) 2022-2024 Solidigm.
*
* Author: leonardo.da.cunha@solidigm.com
*/
#include <unistd.h>
#include <errno.h>
#include "ocp-utils.h"
#include "nvme-print.h"
const unsigned char ocp_uuid[NVME_UUID_LEN] = {
0xc1, 0x94, 0xd5, 0x5b, 0xe0, 0x94, 0x47, 0x94, 0xa2, 0x1d,
0x29, 0x99, 0x8f, 0x56, 0xbe, 0x6f };
int ocp_get_uuid_index(struct nvme_dev *dev, int *index)
int ocp_find_uuid_index(struct nvme_id_uuid_list *uuid_list, __u8 *index)
{
int i = nvme_uuid_find(uuid_list, ocp_uuid);
*index = 0;
if (i > 0)
*index = i;
else
return -errno;
return 0;
}
int ocp_get_uuid_index(struct nvme_dev *dev, __u8 *index)
{
struct nvme_id_uuid_list uuid_list;
int err = nvme_identify_uuid(dev_fd(dev), &uuid_list);
@ -22,11 +35,5 @@ int ocp_get_uuid_index(struct nvme_dev *dev, int *index)
if (err)
return err;
for (int i = 0; i < NVME_ID_UUID_LIST_MAX; i++) {
if (memcmp(ocp_uuid, &uuid_list.entry[i].uuid, NVME_UUID_LEN) == 0) {
*index = i + 1;
break;
}
}
return err;
return ocp_find_uuid_index(&uuid_list, index);
}

View file

@ -1,18 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2022 Solidigm.
* Copyright (c) 2022-2024 Solidigm.
*
* Author: leonardo.da.cunha@solidigm.com
*/
#include "nvme.h"
/*
* UUID assigned for OCP.
*/
extern const unsigned char ocp_uuid[NVME_UUID_LEN];
/**
* ocp_get_uuid_index() - Get OCP UUID index
* @dev: nvme device
* @index: integer pointer to here to save the index
* @result: The command completion result from CQE dword0
*
* Return: Zero if nvme device has UUID list log page, or result of get uuid list otherwise.
* Return: Zero if nvme device has UUID list identify page, or positive result of get uuid list
* or negative POSIX error code otherwise.
*/
int ocp_get_uuid_index(struct nvme_dev *dev, int *index);
int ocp_get_uuid_index(struct nvme_dev *dev, __u8 *index);
/**
* ocp_find_uuid_index() - Find OCP UUID index in UUID list
* @uuid_list: uuid_list retrieved from Identify UUID List (CNS 0x17)
* @index: integer pointer to here to save the index
*
* Return: Zero if nvme device has UUID list log page, Negative POSIX error code otherwise.
*/
int ocp_find_uuid_index(struct nvme_id_uuid_list *uuid_list, __u8 *index);

View file

@ -169,8 +169,10 @@ int sedopal_cmd_initialize(int fd)
struct opal_key key;
struct opal_lr_act lr_act = {};
struct opal_user_lr_setup lr_setup = {};
struct opal_new_pw new_pw = {};
sedopal_ask_key = true;
sedopal_ask_new_key = true;
rc = sedopal_set_key(&key);
if (rc != 0)
return rc;
@ -217,6 +219,21 @@ int sedopal_cmd_initialize(int fd)
return rc;
}
/*
* set password
*/
new_pw.new_user_pw.who = OPAL_ADMIN1;
new_pw.new_user_pw.opal_key.lr = 0;
new_pw.session.who = OPAL_ADMIN1;
new_pw.session.sum = 0;
new_pw.session.opal_key.lr = 0;
new_pw.session.opal_key = key;
new_pw.new_user_pw.opal_key = key;
rc = ioctl(fd, IOC_OPAL_SET_PW, &new_pw);
if (rc != 0)
fprintf(stderr, "Error: failed setting password - %d\n", rc);
return rc;
}
@ -455,7 +472,7 @@ int sedopal_cmd_discover(int fd)
struct level_0_discovery_features *feat;
struct level_0_discovery_features *feat_end;
uint16_t code;
uint8_t locking_flags;
uint8_t locking_flags = 0;
char buf[4096];
discover.data = (__u64)buf;

View file

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2022 Solidigm.
* Copyright (c) 2022-2024 Solidigm.
*
* Author: leonardo.da.cunha@solidigm.com
*/
@ -97,7 +97,7 @@ int solidigm_get_garbage_collection_log(int argc, char **argv, struct command *c
return -EINVAL;
}
uuid_index = solidigm_get_vu_uuid_index(dev);
sldgm_get_uuid_index(dev, &uuid_index);
struct garbage_control_collection_log gc_log;
const int solidigm_vu_gc_log_id = 0xfd;

View file

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2022 Solidigm.
* Copyright (c) 2022-2024 Solidigm.
*
* Author: leonardo.da.cunha@solidigm.com
*/
@ -430,7 +430,7 @@ int solidigm_get_latency_tracking_log(int argc, char **argv, struct command *cmd
return -EINVAL;
}
lt.uuid_index = solidigm_get_vu_uuid_index(dev);
sldgm_get_uuid_index(dev, &lt.uuid_index);
err = latency_tracking_enable(&lt);
if (err) {

View file

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2023 Solidigm.
* Copyright (c) 2023-2024 Solidigm.
*
* Author: karl.dedow@solidigm.com
*/
@ -15,6 +15,7 @@
#include "nvme-print.h"
#include "plugins/ocp/ocp-utils.h"
#include "solidigm-util.h"
#define MIN_VENDOR_LID 0xC0
#define SOLIDIGM_MAX_UUID 2
@ -38,41 +39,9 @@ static void init_lid_dir(struct lid_dir *lid_dir)
}
}
static bool is_invalid_uuid(const struct nvme_id_uuid_list_entry entry)
{
static const unsigned char ALL_ZERO_UUID[NVME_UUID_LEN] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
return memcmp(ALL_ZERO_UUID, entry.uuid, NVME_UUID_LEN) == 0;
}
static bool is_solidigm_uuid(const struct nvme_id_uuid_list_entry entry)
{
static const unsigned char SOLIDIGM_UUID[NVME_UUID_LEN] = {
0x96, 0x19, 0x58, 0x6e, 0xc1, 0x1b, 0x43, 0xad,
0xaa, 0xaa, 0x65, 0x41, 0x87, 0xf6, 0xbb, 0xb2
};
return memcmp(SOLIDIGM_UUID, entry.uuid, NVME_UUID_LEN) == 0;
}
static bool is_ocp_uuid(const struct nvme_id_uuid_list_entry entry)
{
static const unsigned char OCP_UUID[NVME_UUID_LEN] = {
0xc1, 0x94, 0xd5, 0x5b, 0xe0, 0x94, 0x47, 0x94,
0xa2, 0x1d, 0x29, 0x99, 0x8f, 0x56, 0xbe, 0x6f
};
return memcmp(OCP_UUID, entry.uuid, NVME_UUID_LEN) == 0;
}
static int get_supported_log_pages_log(struct nvme_dev *dev, int uuid_index,
struct nvme_supported_log_pages *supported)
{
static const __u8 LID;
memset(supported, 0, sizeof(*supported));
struct nvme_get_log_args args = {
.lpo = 0,
@ -81,7 +50,7 @@ static int get_supported_log_pages_log(struct nvme_dev *dev, int uuid_index,
.args_size = sizeof(args),
.fd = dev_fd(dev),
.timeout = NVME_DEFAULT_IOCTL_TIMEOUT,
.lid = LID,
.lid = NVME_LOG_LID_SUPPORTED_LOG_PAGES,
.len = sizeof(*supported),
.nsid = NVME_NSID_ALL,
.csi = NVME_CSI_NVM,
@ -101,8 +70,8 @@ static struct lid_dir *get_standard_lids(struct nvme_supported_log_pages *suppor
init_lid_dir(&standard_dir);
for (int lid = 0; lid < NVME_LOG_SUPPORTED_LOG_PAGES_MAX; lid++) {
if (!supported->lid_support[lid] || lid >= MIN_VENDOR_LID)
for (int lid = 0; lid < MIN_VENDOR_LID; lid++) {
if (!supported->lid_support[lid])
continue;
standard_dir.lid[lid].supported = true;
@ -128,12 +97,15 @@ static struct lid_dir *get_solidigm_lids(struct nvme_supported_log_pages *suppor
static struct lid_dir solidigm_dir = { 0 };
init_lid_dir(&solidigm_dir);
solidigm_dir.lid[0xC0].str = "OCP SMART / Health Information Extended";
solidigm_dir.lid[0xC1].str = "Read Commands Latency Statistics";
solidigm_dir.lid[0xC2].str = "Write Commands Latency Statistics";
solidigm_dir.lid[0xC3].str = "OCP Latency Monitor";
solidigm_dir.lid[0xC4].str = "Endurance Manager Statistics";
solidigm_dir.lid[0xC5].str = "Temperature Statistics";
solidigm_dir.lid[0xCA].str = "SMART Attributes";
solidigm_dir.lid[0xCB].str = "VU NVMe IO Queue Metrics Log Page";
solidigm_dir.lid[0xD5].str = solidigm_dir.lid[0xC5].str;
solidigm_dir.lid[0xDD].str = "VU Marketing Description Log Page";
solidigm_dir.lid[0xEF].str = "Performance Rating and LBA Access Histogram";
solidigm_dir.lid[0xF2].str = "Get Power Usage Log Page";
@ -222,7 +194,7 @@ int solidigm_get_log_page_directory_log(int argc, char **argv, struct command *c
OPT_END()
};
struct nvme_dev *dev = NULL;
_cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
int err = parse_and_open(&dev, argc, argv, description, options);
if (err)
@ -247,16 +219,21 @@ int solidigm_get_log_page_directory_log(int argc, char **argv, struct command *c
lid_dirs[NO_UUID_INDEX]->lid[lid] = solidigm_lid_dir->lid[lid];
}
} else {
for (int uuid_index = 1; uuid_index <= SOLIDIGM_MAX_UUID; uuid_index++) {
if (is_invalid_uuid(uuid_list.entry[uuid_index - 1]))
break;
else if (get_supported_log_pages_log(dev, uuid_index, &supported))
continue;
__u8 sldgm_idx;
__u8 ocp_idx;
if (is_solidigm_uuid(uuid_list.entry[uuid_index - 1]))
lid_dirs[uuid_index] = get_solidigm_lids(&supported);
else if (is_ocp_uuid(uuid_list.entry[uuid_index - 1]))
lid_dirs[uuid_index] = get_ocp_lids(&supported);
sldgm_find_uuid_index(&uuid_list, &sldgm_idx);
ocp_find_uuid_index(&uuid_list, &ocp_idx);
if (sldgm_idx && (sldgm_idx <= SOLIDIGM_MAX_UUID)) {
err = get_supported_log_pages_log(dev, sldgm_idx, &supported);
if (!err)
lid_dirs[sldgm_idx] = get_solidigm_lids(&supported);
}
if (ocp_idx && (ocp_idx <= SOLIDIGM_MAX_UUID)) {
err = get_supported_log_pages_log(dev, ocp_idx, &supported);
if (!err)
lid_dirs[ocp_idx] = get_ocp_lids(&supported);
}
}
} else {
@ -279,8 +256,5 @@ int solidigm_get_log_page_directory_log(int argc, char **argv, struct command *c
}
}
/* Redundant close() to make static code analysis happy */
close(dev->direct.fd);
dev_close(dev);
return err;
}

View file

@ -13,7 +13,7 @@
#include "cmd.h"
#define SOLIDIGM_PLUGIN_VERSION "1.1"
#define SOLIDIGM_PLUGIN_VERSION "1.2"
PLUGIN(NAME("solidigm", "Solidigm vendor specific extensions", SOLIDIGM_PLUGIN_VERSION),
COMMAND_LIST(

View file

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2022 Solidigm.
* Copyright (c) 2022-2024 Solidigm.
*
* Author: leonardo.da.cunha@solidigm.com
*/
@ -229,7 +229,7 @@ int solidigm_get_additional_smart_log(int argc, char **argv, struct command *cmd
return err;
}
uuid_index = solidigm_get_vu_uuid_index(dev);
sldgm_get_uuid_index(dev, &uuid_index);
struct nvme_get_log_args args = {
.lpo = 0,

View file

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2023 Solidigm.
* Copyright (c) 2023-2024 Solidigm.
*
* Author: leonardo.da.cunha@solidigm.com
*/
@ -11,7 +11,8 @@
#include "nvme-print.h"
#include "solidigm-util.h"
#define SLDGM_TEMP_STATS_LID 0xC5
#define SLDGM_LEGACY_TEMP_STATS_LID 0xC5
#define SLDGM_TEMP_STATS_LID 0xD5
struct temp_stats {
__le64 curr;
@ -40,7 +41,7 @@ static void show_temp_stats(struct temp_stats *stats)
int sldgm_get_temp_stats_log(int argc, char **argv, struct command *cmd, struct plugin *plugin)
{
unsigned char buffer[4096] = {0};
struct nvme_dev *dev;
_cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
__u8 uuid_idx;
int err;
@ -63,7 +64,7 @@ int sldgm_get_temp_stats_log(int argc, char **argv, struct command *cmd, struct
if (err)
return err;
uuid_idx = solidigm_get_vu_uuid_index(dev);
sldgm_get_uuid_index(dev, &uuid_idx);
struct nvme_get_log_args args = {
.lpo = 0,
@ -84,25 +85,26 @@ int sldgm_get_temp_stats_log(int argc, char **argv, struct command *cmd, struct
};
err = nvme_get_log(&args);
if (!err) {
uint64_t *guid = (uint64_t *)&buffer[4080];
if (err > 0) {
args.lid = SLDGM_LEGACY_TEMP_STATS_LID;
err = nvme_get_log(&args);
if (!err) {
uint64_t *guid = (uint64_t *)&buffer[4080];
if (guid[1] == 0xC7BB98B7D0324863 && guid[0] == 0xBB2C23990E9C722F) {
fprintf(stderr, "Error: Log page has 'OCP unsupported Requirements' GUID\n");
err = -EBADMSG;
goto closefd;
if (guid[1] == 0xC7BB98B7D0324863 && guid[0] == 0xBB2C23990E9C722F) {
fprintf(stderr,
"Error: Log page has OCP unsupported Requirements GUID\n");
return -EBADMSG;
}
}
}
if (!err) {
if (!cfg.raw_binary)
show_temp_stats((struct temp_stats *) buffer);
else
d_raw(buffer, sizeof(struct temp_stats));
} else if (err > 0) {
} else if (err > 0)
nvme_show_status(err);
}
closefd:
/* Redundant close() to make static code analysis happy */
close(dev->direct.fd);
dev_close(dev);
return err;
}

View file

@ -1,20 +1,39 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2023 Solidigm.
* Copyright (c) 2023-2024 Solidigm.
*
* Author: leonardo.da.cunha@solidigm.com
*/
#include "plugins/ocp/ocp-utils.h"
#include <errno.h>
#include "solidigm-util.h"
__u8 solidigm_get_vu_uuid_index(struct nvme_dev *dev)
{
int ocp_uuid_index = 0;
const unsigned char solidigm_uuid[NVME_UUID_LEN] = {
0x96, 0x19, 0x58, 0x6e, 0xc1, 0x1b, 0x43, 0xad,
0xaa, 0xaa, 0x65, 0x41, 0x87, 0xf6, 0xbb, 0xb2
};
if (ocp_get_uuid_index(dev, &ocp_uuid_index) == 0)
if (ocp_uuid_index == 2)
return 1;
int sldgm_find_uuid_index(struct nvme_id_uuid_list *uuid_list, __u8 *index)
{
int i = nvme_uuid_find(uuid_list, solidigm_uuid);
*index = 0;
if (i > 0)
*index = i;
else
return -errno;
return 0;
}
int sldgm_get_uuid_index(struct nvme_dev *dev, __u8 *index)
{
struct nvme_id_uuid_list uuid_list;
int err = nvme_identify_uuid(dev_fd(dev), &uuid_list);
*index = 0;
if (err)
return err;
return sldgm_find_uuid_index(&uuid_list, index);
}

View file

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2023 Solidigm.
* Copyright (c) 2023-2024 Solidigm.
*
* Author: leonardo.da.cunha@solidigm.com
*/
@ -9,4 +9,5 @@
#define DRIVER_MAX_TX_256K (256 * 1024)
__u8 solidigm_get_vu_uuid_index(struct nvme_dev *dev);
int sldgm_find_uuid_index(struct nvme_id_uuid_list *uuid_list, __u8 *index);
int sldgm_get_uuid_index(struct nvme_dev *dev, __u8 *index);

430
plugins/ssstc/ssstc-nvme.c Normal file
View file

@ -0,0 +1,430 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <fcntl.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <inttypes.h>
#include "common.h"
#include "nvme.h"
#include "libnvme.h"
#include "plugin.h"
#include "linux/types.h"
#include "nvme-print.h"
#define CREATE_CMD
#include "ssstc-nvme.h"
struct __packed nvme_additional_smart_log_item
{
__u8 key;
__u8 norm;
union __packed {
__u8 raw[6];
struct __packed wear_level
{
__le16 min;
__le16 max;
__le16 avg;
} wear_level;
};
__u8 _rp[2];
};
struct nvme_additional_smart_log {
struct nvme_additional_smart_log_item program_fail_cnt;
struct nvme_additional_smart_log_item erase_fail_cnt;
struct nvme_additional_smart_log_item wear_leveling_cnt;
struct nvme_additional_smart_log_item e2e_err_cnt;
struct nvme_additional_smart_log_item crc_err_cnt;
struct nvme_additional_smart_log_item nand_bytes_written;
struct nvme_additional_smart_log_item host_bytes_written;
struct nvme_additional_smart_log_item reallocated_sector_count;
struct nvme_additional_smart_log_item uncorrectable_sector_count;
struct nvme_additional_smart_log_item NAND_ECC_Detection_Count;
struct nvme_additional_smart_log_item NAND_ECC_Correction_Count;
struct nvme_additional_smart_log_item Bad_Block_Failure_Rate;
struct nvme_additional_smart_log_item GC_Count;
struct nvme_additional_smart_log_item DRAM_UECC_Detection_Count;
struct nvme_additional_smart_log_item SRAM_UECC_Detection_Count;
struct nvme_additional_smart_log_item Raid_Recovery_Fail_Count;
struct nvme_additional_smart_log_item Inflight_Command;
struct nvme_additional_smart_log_item Internal_End_to_End_Dect_Count;
struct nvme_additional_smart_log_item PCIe_Correctable_Error_Count;
struct nvme_additional_smart_log_item die_fail_count;
struct nvme_additional_smart_log_item wear_leveling_exec_count;
struct nvme_additional_smart_log_item read_disturb_count;
struct nvme_additional_smart_log_item data_retention_count;
};
static
void show_ssstc_add_smart_log_jsn(struct nvme_additional_smart_log *smart,
unsigned int nsid, const char *devname)
{
struct json_object *root, *entry_stats, *dev_stats, *multi;
__uint16_t wear_level_min = 0;
__uint16_t wear_level_max = 0;
__uint16_t wear_level_avg = 0;
uint64_t raw_val = 0;
root = json_create_object();
json_object_add_value_string(root, "SSSTC Smart log", devname);
dev_stats = json_create_object();
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->program_fail_cnt.key);
json_object_add_value_int(entry_stats, "normalized", smart->program_fail_cnt.norm);
raw_val = int48_to_long(smart->program_fail_cnt.raw);
json_object_add_value_int(entry_stats, "raw", raw_val);
json_object_add_value_object(dev_stats, "program_fail_count", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->erase_fail_cnt.key);
json_object_add_value_int(entry_stats, "normalized", smart->erase_fail_cnt.norm);
raw_val = int48_to_long(smart->erase_fail_cnt.raw);
json_object_add_value_int(entry_stats, "raw", raw_val);
json_object_add_value_object(dev_stats, "erase_fail_count", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->wear_leveling_cnt.key);
json_object_add_value_int(entry_stats, "normalized", smart->wear_leveling_cnt.norm);
multi = json_create_object();
wear_level_min = le16_to_cpu(smart->wear_leveling_cnt.wear_level.min);
wear_level_max = le16_to_cpu(smart->wear_leveling_cnt.wear_level.max);
wear_level_avg = le16_to_cpu(smart->wear_leveling_cnt.wear_level.avg);
json_object_add_value_int(multi, "min", wear_level_min);
json_object_add_value_int(multi, "max", wear_level_max);
json_object_add_value_int(multi, "avg", wear_level_avg);
json_object_add_value_object(entry_stats, "raw", multi);
json_object_add_value_object(dev_stats, "wear_leveling", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->e2e_err_cnt.key);
json_object_add_value_int(entry_stats, "normalized", smart->e2e_err_cnt.norm);
multi = json_create_object();
wear_level_min = le16_to_cpu(smart->e2e_err_cnt.wear_level.min);
wear_level_max = le16_to_cpu(smart->e2e_err_cnt.wear_level.max);
wear_level_avg = le16_to_cpu(smart->e2e_err_cnt.wear_level.avg);
json_object_add_value_int(multi, "guard check error", wear_level_min);
json_object_add_value_int(multi, "application tag check error", wear_level_max);
json_object_add_value_int(multi, "reference tag check error", wear_level_avg);
json_object_add_value_object(entry_stats, "raw", multi);
json_object_add_value_object(dev_stats, "end_to_end_error_dect_count", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->crc_err_cnt.key);
json_object_add_value_int(entry_stats, "normalized", smart->crc_err_cnt.norm);
raw_val = int48_to_long(smart->crc_err_cnt.raw);
json_object_add_value_int(entry_stats, "raw", raw_val);
json_object_add_value_object(dev_stats, "crc_error_count", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->nand_bytes_written.key);
json_object_add_value_int(entry_stats, "normalized", smart->nand_bytes_written.norm);
raw_val = int48_to_long(smart->nand_bytes_written.raw);
json_object_add_value_int(entry_stats, "raw", raw_val);
json_object_add_value_object(dev_stats, "nand_bytes_written", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->host_bytes_written.key);
json_object_add_value_int(entry_stats, "normalized", smart->host_bytes_written.norm);
raw_val = int48_to_long(smart->host_bytes_written.raw);
json_object_add_value_int(entry_stats, "raw", raw_val);
json_object_add_value_object(dev_stats, "host_bytes_written", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->reallocated_sector_count.key);
json_object_add_value_int(entry_stats, "normalized", smart->reallocated_sector_count.norm);
raw_val = int48_to_long(smart->reallocated_sector_count.raw);
json_object_add_value_int(entry_stats, "raw", raw_val);
json_object_add_value_object(dev_stats, "reallocated_sector_count", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->uncorrectable_sector_count.key);
json_object_add_value_int(entry_stats, "normalized",
smart->uncorrectable_sector_count.norm);
raw_val = int48_to_long(smart->uncorrectable_sector_count.raw);
json_object_add_value_int(entry_stats, "raw", raw_val);
json_object_add_value_object(dev_stats, "uncorrectable_sector_count", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->NAND_ECC_Detection_Count.key);
json_object_add_value_int(entry_stats, "normalized", smart->NAND_ECC_Detection_Count.norm);
raw_val = int48_to_long(smart->NAND_ECC_Detection_Count.raw);
json_object_add_value_int(entry_stats, "raw", raw_val);
json_object_add_value_object(dev_stats, "NAND_ECC_detection_count", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->NAND_ECC_Correction_Count.key);
json_object_add_value_int(entry_stats, "normalized", smart->NAND_ECC_Correction_Count.norm);
raw_val = int48_to_long(smart->NAND_ECC_Correction_Count.raw);
json_object_add_value_int(entry_stats, "raw", raw_val);
json_object_add_value_object(dev_stats, "NAND_ECC_correction_count", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->GC_Count.key);
json_object_add_value_int(entry_stats, "normalized", smart->GC_Count.norm);
raw_val = int48_to_long(smart->GC_Count.raw);
json_object_add_value_int(entry_stats, "raw", raw_val);
json_object_add_value_object(dev_stats, "GC_count", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->DRAM_UECC_Detection_Count.key);
json_object_add_value_int(entry_stats, "normalized", smart->DRAM_UECC_Detection_Count.norm);
multi = json_create_object();
wear_level_max = le16_to_cpu(smart->DRAM_UECC_Detection_Count.wear_level.max);
wear_level_avg = le16_to_cpu(smart->DRAM_UECC_Detection_Count.wear_level.avg);
json_object_add_value_int(multi, "1-Bit Err", wear_level_max);
json_object_add_value_int(multi, "2-Bit Err", wear_level_avg);
json_object_add_value_object(entry_stats, "raw", multi);
json_object_add_value_object(dev_stats, "DRAM_UECC_detection_count", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->SRAM_UECC_Detection_Count.key);
json_object_add_value_int(entry_stats, "normalized", smart->SRAM_UECC_Detection_Count.norm);
multi = json_create_object();
wear_level_min = le16_to_cpu(smart->SRAM_UECC_Detection_Count.wear_level.min);
wear_level_max = le16_to_cpu(smart->SRAM_UECC_Detection_Count.wear_level.max);
wear_level_avg = le16_to_cpu(smart->SRAM_UECC_Detection_Count.wear_level.avg);
json_object_add_value_int(multi, "parity error detected", wear_level_min);
json_object_add_value_int(multi, "ecc error detection", wear_level_max);
json_object_add_value_int(multi, "axi data parity errors", wear_level_avg);
json_object_add_value_object(entry_stats, "raw", multi);
json_object_add_value_object(dev_stats, "SRAM_UECC_Detection_Count", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->Raid_Recovery_Fail_Count.key);
json_object_add_value_int(entry_stats, "normalized", smart->Raid_Recovery_Fail_Count.norm);
raw_val = int48_to_long(smart->Raid_Recovery_Fail_Count.raw);
json_object_add_value_int(entry_stats, "raw", raw_val);
json_object_add_value_object(dev_stats, "raid_Recovery_fail_count", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->Inflight_Command.key);
json_object_add_value_int(entry_stats, "normalized", smart->Inflight_Command.norm);
multi = json_create_object();
wear_level_min = le16_to_cpu(smart->Inflight_Command.wear_level.min);
wear_level_max = le16_to_cpu(smart->Inflight_Command.wear_level.max);
wear_level_avg = le16_to_cpu(smart->Inflight_Command.wear_level.avg);
json_object_add_value_int(multi, "Read Cmd", wear_level_min);
json_object_add_value_int(multi, "Write Cmd", wear_level_max);
json_object_add_value_int(multi, "Admin Cmd", wear_level_avg);
json_object_add_value_object(entry_stats, "raw", multi);
json_object_add_value_object(dev_stats, "Inflight_Command", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->Internal_End_to_End_Dect_Count.key);
json_object_add_value_int(entry_stats, "normalized", 100);
multi = json_create_object();
wear_level_min = le16_to_cpu(smart->Internal_End_to_End_Dect_Count.wear_level.min);
wear_level_max = le16_to_cpu(smart->Internal_End_to_End_Dect_Count.wear_level.max);
wear_level_avg = le16_to_cpu(smart->Internal_End_to_End_Dect_Count.wear_level.avg);
json_object_add_value_int(multi, "read hcrc", wear_level_min);
json_object_add_value_int(multi, "write hcrc", wear_level_max);
json_object_add_value_int(multi, "reserved", wear_level_avg);
json_object_add_value_object(entry_stats, "raw", multi);
json_object_add_value_object(dev_stats, "internal_end_to_end_dect_count", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->die_fail_count.key);
json_object_add_value_int(entry_stats, "normalized", smart->die_fail_count.norm);
raw_val = int48_to_long(smart->die_fail_count.raw);
json_object_add_value_int(entry_stats, "raw", raw_val);
json_object_add_value_object(dev_stats, "die_fail_count", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->wear_leveling_exec_count.key);
json_object_add_value_int(entry_stats, "normalized", smart->wear_leveling_exec_count.norm);
raw_val = int48_to_long(smart->wear_leveling_exec_count.raw);
json_object_add_value_int(entry_stats, "raw", raw_val);
json_object_add_value_object(dev_stats, "wear_leveling_exec_count", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->read_disturb_count.key);
json_object_add_value_int(entry_stats, "normalized", smart->read_disturb_count.norm);
raw_val = int48_to_long(smart->read_disturb_count.raw);
json_object_add_value_int(entry_stats, "raw", raw_val);
json_object_add_value_object(dev_stats, "read_disturb_count", entry_stats);
entry_stats = json_create_object();
json_object_add_value_int(entry_stats, "#id", smart->data_retention_count.key);
json_object_add_value_int(entry_stats, "normalized", smart->data_retention_count.norm);
raw_val = int48_to_long(smart->data_retention_count.raw);
json_object_add_value_int(entry_stats, "raw", raw_val);
json_object_add_value_object(dev_stats, "data_retention_count", entry_stats);
json_object_add_value_object(root, "Device stats", dev_stats);
json_print_object(root, NULL);
json_free_object(root);
}
static
void show_ssstc_add_smart_log(struct nvme_additional_smart_log *smart,
unsigned int nsid, const char *devname)
{
printf("Additional Smart Log for NVME device:%s namespace-id:%x\n",
devname, nsid);
printf("key #id normalized raw\n");
printf("program_fail_count : %03d %3d%% %"PRIu64"\n",
smart->program_fail_cnt.key,
smart->program_fail_cnt.norm,
int48_to_long(smart->program_fail_cnt.raw));
printf("erase_fail_count : %03d %3d%% %"PRIu64"\n",
smart->erase_fail_cnt.key,
smart->erase_fail_cnt.norm,
int48_to_long(smart->erase_fail_cnt.raw));
printf("wear_leveling : %03d %3d%% min: %u, max: %u, avg: %u\n",
smart->wear_leveling_cnt.key,
smart->wear_leveling_cnt.norm,
le16_to_cpu(smart->wear_leveling_cnt.wear_level.min),
le16_to_cpu(smart->wear_leveling_cnt.wear_level.max),
le16_to_cpu(smart->wear_leveling_cnt.wear_level.avg));
printf("end_to_end_error_dect_count : %03d %3d%% "
"guard check error: %u, "
"application tag check error: %u, "
"reference tag check error: %u\n",
smart->e2e_err_cnt.key,
smart->e2e_err_cnt.norm,
le16_to_cpu(smart->e2e_err_cnt.wear_level.min),
le16_to_cpu(smart->e2e_err_cnt.wear_level.max),
le16_to_cpu(smart->e2e_err_cnt.wear_level.avg));
printf("crc_error_count : %03d %3d%% %"PRIu64"\n",
smart->crc_err_cnt.key,
smart->crc_err_cnt.norm,
int48_to_long(smart->crc_err_cnt.raw));
printf("nand_bytes_written : %03d %3d%% sectors: %"PRIu64"\n",
smart->nand_bytes_written.key,
smart->nand_bytes_written.norm,
int48_to_long(smart->nand_bytes_written.raw));
printf("host_bytes_written : %3d %3d%% sectors: %"PRIu64"\n",
smart->host_bytes_written.key,
smart->host_bytes_written.norm,
int48_to_long(smart->host_bytes_written.raw));
printf("reallocated_sector_count : %03d %3d%% %"PRIu64"\n",
smart->reallocated_sector_count.key,
smart->reallocated_sector_count.norm,
int48_to_long(smart->reallocated_sector_count.raw));
printf("uncorrectable_sector_count : %03d %3d%% %"PRIu64"\n",
smart->uncorrectable_sector_count.key,
smart->uncorrectable_sector_count.norm,
int48_to_long(smart->uncorrectable_sector_count.raw));
printf("NAND_ECC_detection_count : %03d %3d%% %"PRIu64"\n",
smart->NAND_ECC_Detection_Count.key,
smart->NAND_ECC_Detection_Count.norm,
int48_to_long(smart->NAND_ECC_Detection_Count.raw));
printf("NAND_ECC_correction_count : %03d %3d%% %"PRIu64"\n",
smart->NAND_ECC_Correction_Count.key,
smart->NAND_ECC_Correction_Count.norm,
int48_to_long(smart->NAND_ECC_Correction_Count.raw));
printf("GC_count : %03d %3d%% %"PRIu64"\n",
smart->GC_Count.key,
smart->GC_Count.norm,
int48_to_long(smart->GC_Count.raw));
printf("DRAM_UECC_detection_count : %03d %3d%% 1-Bit Err: %u, 2-Bit Err: %u\n",
smart->DRAM_UECC_Detection_Count.key,
smart->DRAM_UECC_Detection_Count.norm,
le16_to_cpu(smart->DRAM_UECC_Detection_Count.wear_level.max),
le16_to_cpu(smart->DRAM_UECC_Detection_Count.wear_level.avg));
printf("SRAM_UECC_Detection_Count : %03d %3d%% "
"parity error detected: %u, "
"ecc error detection: %u, "
"axi data parity errors: %u\n",
smart->SRAM_UECC_Detection_Count.key,
smart->SRAM_UECC_Detection_Count.norm,
le16_to_cpu(smart->SRAM_UECC_Detection_Count.wear_level.min),
le16_to_cpu(smart->SRAM_UECC_Detection_Count.wear_level.max),
le16_to_cpu(smart->SRAM_UECC_Detection_Count.wear_level.avg));
printf("raid_recovery_fail_count : %03d %3d%% %"PRIu64"\n",
smart->Raid_Recovery_Fail_Count.key,
smart->Raid_Recovery_Fail_Count.norm,
int48_to_long(smart->Raid_Recovery_Fail_Count.raw));
printf("Inflight_Command : %03d %3d%% "
"Read Cmd: %u, Write Cmd: %u, Admin Cmd: %u\n",
smart->Inflight_Command.key,
smart->Inflight_Command.norm,
le16_to_cpu(smart->Inflight_Command.wear_level.min),
le16_to_cpu(smart->Inflight_Command.wear_level.max),
le16_to_cpu(smart->Inflight_Command.wear_level.avg));
printf("internal_end_to_end_dect_count : %03d %3d%% "
"read hcrc: %u, write hcrc: %u, reserved: %u\n",
smart->Internal_End_to_End_Dect_Count.key,
100,
le16_to_cpu(smart->Internal_End_to_End_Dect_Count.wear_level.min),
le16_to_cpu(smart->Internal_End_to_End_Dect_Count.wear_level.max),
le16_to_cpu(smart->Internal_End_to_End_Dect_Count.wear_level.avg));
printf("die_fail_count : %03d %3d%% %"PRIu64"\n",
smart->die_fail_count.key,
smart->die_fail_count.norm,
int48_to_long(smart->die_fail_count.raw));
printf("wear_leveling_exec_count : %03d %3d%% %"PRIu64"\n",
smart->wear_leveling_exec_count.key,
smart->wear_leveling_exec_count.norm,
int48_to_long(smart->wear_leveling_exec_count.raw));
printf("read_disturb_count : %03d %3d%% %"PRIu64"\n",
smart->read_disturb_count.key,
smart->read_disturb_count.norm,
int48_to_long(smart->read_disturb_count.raw));
printf("data_retention_count : %03d %3d%% %"PRIu64"\n",
smart->data_retention_count.key,
smart->data_retention_count.norm,
int48_to_long(smart->data_retention_count.raw));
}
static
int ssstc_get_add_smart_log(int argc, char **argv, struct command *cmd, struct plugin *plugin)
{
const char *desc =
"Get SSSTC vendor specific additional smart log\n"
"(optionally, for the specified namespace), and show it.";
const char *namespace = "(optional) desired namespace";
const char *raw = "Dump output in binary format";
const char *json = "Dump output in json format";
struct nvme_additional_smart_log smart_log_add;
struct nvme_dev *dev;
int err;
struct config {
__u32 namespace_id;
bool raw_binary;
bool json;
};
struct config cfg = {
.namespace_id = NVME_NSID_ALL,
};
OPT_ARGS(opts) = {
OPT_UINT("namespace-id", 'n', &cfg.namespace_id, namespace),
OPT_FLAG("raw-binary", 'b', &cfg.raw_binary, raw),
OPT_FLAG("json", 'j', &cfg.json, json),
OPT_END()
};
err = parse_and_open(&dev, argc, argv, desc, opts);
if (err)
return err;
err = nvme_get_log_simple(dev_fd(dev), 0xca, sizeof(smart_log_add),
&smart_log_add);
if (!err) {
if (cfg.json)
show_ssstc_add_smart_log_jsn(&smart_log_add, cfg.namespace_id,
dev->name);
else if (!cfg.raw_binary)
show_ssstc_add_smart_log(&smart_log_add, cfg.namespace_id,
dev->name);
else
d_raw((unsigned char *)&smart_log_add, sizeof(smart_log_add));
} else if (err > 0) {
nvme_show_status(err);
}
dev_close(dev);
return err;
}

View file

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#undef CMD_INC_FILE
#define CMD_INC_FILE plugins/ssstc/ssstc-nvme
#if !defined(SSSTC_NVME) || defined(CMD_HEADER_MULTI_READ)
#define SSSTC_NVME
#include "cmd.h"
PLUGIN(NAME("ssstc", "SSSTC vendor specific extensions", NVME_VERSION),
COMMAND_LIST(
ENTRY("smart-log-add", "Retrieve ssstc SMART Log, show it", ssstc_get_add_smart_log)
)
);
#endif
#include "define_cmd.h"

View file

@ -1383,6 +1383,11 @@ struct __packed wdc_fw_act_history_log_format_c2 {
__u8 log_page_guid[WDC_C2_GUID_LENGTH];
};
static __u8 ocp_C2_guid[WDC_C2_GUID_LENGTH] = {
0x6D, 0x79, 0x9A, 0x76, 0xB4, 0xDA, 0xF6, 0xA3,
0xE2, 0x4D, 0xB2, 0x8A, 0xAC, 0xF3, 0x1C, 0xD1
};
#define WDC_OCP_C4_GUID_LENGTH 16
#define WDC_DEV_CAP_LOG_BUF_LEN 4096
#define WDC_DEV_CAP_LOG_ID 0xC4
@ -1726,7 +1731,6 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
case WDC_NVME_VID_2:
switch (read_device_id) {
case WDC_NVME_SN630_DEV_ID:
fallthrough;
case WDC_NVME_SN630_DEV_ID_1:
capabilities = (WDC_DRIVE_CAP_CAP_DIAG | WDC_DRIVE_CAP_INTERNAL_LOG |
WDC_DRIVE_CAP_DRIVE_STATUS | WDC_DRIVE_CAP_CLEAR_ASSERT |
@ -1743,19 +1747,12 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
break;
case WDC_NVME_SN640_DEV_ID:
fallthrough;
case WDC_NVME_SN640_DEV_ID_1:
fallthrough;
case WDC_NVME_SN640_DEV_ID_2:
fallthrough;
case WDC_NVME_SN640_DEV_ID_3:
fallthrough;
case WDC_NVME_SN560_DEV_ID_1:
fallthrough;
case WDC_NVME_SN560_DEV_ID_2:
fallthrough;
case WDC_NVME_SN560_DEV_ID_3:
fallthrough;
case WDC_NVME_SN660_DEV_ID:
/* verify the 0xC0 log page is supported */
if (wdc_nvme_check_supported_log_page(r, dev,
@ -1816,9 +1813,7 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
break;
case WDC_NVME_SN840_DEV_ID:
fallthrough;
case WDC_NVME_SN840_DEV_ID_1:
fallthrough;
case WDC_NVME_SN860_DEV_ID:
/* verify the 0xC0 log page is supported */
if (wdc_nvme_check_supported_log_page(r, dev,
@ -1826,7 +1821,6 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
capabilities |= WDC_DRIVE_CAP_C0_LOG_PAGE;
fallthrough;
case WDC_NVME_ZN540_DEV_ID:
fallthrough;
case WDC_NVME_SN540_DEV_ID:
capabilities |= (WDC_DRIVE_CAP_CAP_DIAG | WDC_DRIVE_CAP_INTERNAL_LOG |
WDC_DRIVE_CAP_DRIVE_STATUS | WDC_DRIVE_CAP_CLEAR_ASSERT |
@ -1847,17 +1841,11 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
break;
case WDC_NVME_SN650_DEV_ID:
fallthrough;
case WDC_NVME_SN650_DEV_ID_1:
fallthrough;
case WDC_NVME_SN650_DEV_ID_2:
fallthrough;
case WDC_NVME_SN650_DEV_ID_3:
fallthrough;
case WDC_NVME_SN650_DEV_ID_4:
fallthrough;
case WDC_NVME_SN655_DEV_ID:
fallthrough;
case WDC_NVME_SN550_DEV_ID:
/* verify the 0xC0 log page is supported */
if (wdc_nvme_check_supported_log_page(r, dev,
@ -1907,7 +1895,6 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
break;
case WDC_NVME_SN861_DEV_ID:
fallthrough;
case WDC_NVME_SN861_DEV_ID_1:
capabilities |= (WDC_DRIVE_CAP_C0_LOG_PAGE |
WDC_DRIVE_CAP_C3_LOG_PAGE |
@ -1921,6 +1908,7 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
WDC_DRIVE_CAP_INFO |
WDC_DRIVE_CAP_CLOUD_SSD_VERSION |
WDC_DRIVE_CAP_LOG_PAGE_DIR |
WDC_DRIVE_CAP_DRIVE_STATUS |
WDC_DRIVE_CAP_SET_LATENCY_MONITOR);
break;
@ -1936,11 +1924,8 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
break;
case WDC_NVME_SN520_DEV_ID:
fallthrough;
case WDC_NVME_SN520_DEV_ID_1:
fallthrough;
case WDC_NVME_SN520_DEV_ID_2:
fallthrough;
case WDC_NVME_SN810_DEV_ID:
capabilities = WDC_DRIVE_CAP_DUI_DATA;
break;
@ -2010,19 +1995,14 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
case WDC_NVME_SN8000S_DEV_ID:
fallthrough;
case WDC_NVME_SN740_DEV_ID:
fallthrough;
case WDC_NVME_SN740_DEV_ID_1:
fallthrough;
case WDC_NVME_SN740_DEV_ID_2:
fallthrough;
case WDC_NVME_SN740_DEV_ID_3:
fallthrough;
case WDC_NVME_SN340_DEV_ID:
capabilities = WDC_DRIVE_CAP_DUI;
break;
case WDC_NVME_ZN350_DEV_ID:
fallthrough;
case WDC_NVME_ZN350_DEV_ID_1:
capabilities = WDC_DRIVE_CAP_DUI_DATA | WDC_DRIVE_CAP_VU_FID_CLEAR_PCIE |
WDC_DRIVE_CAP_C0_LOG_PAGE |
@ -2450,23 +2430,32 @@ static bool get_dev_mgment_cbs_data(nvme_root_t r, struct nvme_dev *dev,
uuid_index = index + 1;
}
if (!uuid_index && needs_c2_log_page_check(device_id)) {
/* In certain devices that don't support UUID lists, there are multiple
* definitions of the C2 logpage. In those cases, the code
* needs to try two UUID indexes and use an identification algorithm
* to determine which is returning the correct log page data.
*/
uuid_ix = 1;
}
found = get_dev_mgmt_log_page_lid_data(dev, cbs_data, lid, log_id, uuid_ix);
if (!found) {
/* not found with uuid = 1 try with uuid = 0 */
uuid_ix = 0;
fprintf(stderr, "Not found, requesting log page with uuid_index %d\n", uuid_index);
if (uuid_present) {
/* use the uuid index found above */
found = get_dev_mgmt_log_page_lid_data(dev, cbs_data, lid, log_id, uuid_index);
} else if (device_id == WDC_NVME_ZN350_DEV_ID || device_id == WDC_NVME_ZN350_DEV_ID_1) {
uuid_index = 0;
found = get_dev_mgmt_log_page_lid_data(dev, cbs_data, lid, log_id, uuid_index);
} else {
if (!uuid_index && needs_c2_log_page_check(device_id)) {
/* In certain devices that don't support UUID lists, there are multiple
* definitions of the C2 logpage. In those cases, the code
* needs to try two UUID indexes and use an identification algorithm
* to determine which is returning the correct log page data.
*/
uuid_ix = 1;
}
found = get_dev_mgmt_log_page_lid_data(dev, cbs_data, lid, log_id, uuid_ix);
if (!found) {
/* not found with uuid = 1 try with uuid = 0 */
uuid_ix = 0;
fprintf(stderr, "Not found, requesting log page with uuid_index %d\n",
uuid_index);
found = get_dev_mgmt_log_page_lid_data(dev, cbs_data, lid, log_id, uuid_ix);
}
}
return found;
@ -5538,17 +5527,18 @@ static void wdc_print_fw_act_history_log_normal(__u8 *data, int num_entries,
char previous_fw[9];
char new_fw[9];
char commit_action_bin[8];
char time_str[11];
char time_str[100];
__u16 oldestEntryIdx = 0, entryIdx = 0;
uint64_t timestamp;
__u64 timestamp_sec;
char *null_fw = "--------";
memset((void *)time_str, 0, 11);
memset((void *)time_str, '\0', 100);
if (data[0] == WDC_NVME_GET_FW_ACT_HISTORY_C2_LOG_ID) {
printf(" Firmware Activate History Log\n");
if (cust_id == WDC_CUSTOMER_ID_0x1005 ||
vendor_id == WDC_NVME_SNDK_VID ||
wdc_is_sn861(device_id)) {
vendor_id == WDC_NVME_SNDK_VID) {
printf(" Power on Hour Power Cycle Previous New\n");
printf(" Entry hh:mm:ss Count Firmware Firmware Slot Action Result\n");
printf(" ----- ----------------- ----------------- --------- --------- ----- ------ -------\n");
@ -5589,48 +5579,33 @@ static void wdc_print_fw_act_history_log_normal(__u8 *data, int num_entries,
memcpy(new_fw, null_fw, 8);
printf("%5"PRIu16"", (uint16_t)le16_to_cpu(fw_act_history_entry->entry[entryIdx].fw_act_hist_entries));
timestamp = (0x0000FFFFFFFFFFFF &
le64_to_cpu(
fw_act_history_entry->entry[entryIdx].timestamp));
timestamp_sec = timestamp / 1000;
if (cust_id == WDC_CUSTOMER_ID_0x1005) {
printf(" ");
memset((void *)time_str, 0, 9);
sprintf((char *)time_str, "%04d:%02d:%02d", (int)(le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp)/3600),
(int)((le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp%3600)/60)),
(int)(le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp%60)));
sprintf((char *)time_str, "%"PRIu32":%u:%u",
(__u32)(timestamp_sec/3600),
(__u8)(timestamp_sec%3600/60),
(__u8)(timestamp_sec%60));
printf("%s", time_str);
printf(" ");
} else if (vendor_id == WDC_NVME_SNDK_VID) {
printf(" ");
uint64_t timestamp = (0x0000FFFFFFFFFFFF & le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp));
memset((void *)time_str, 0, 9);
sprintf((char *)time_str, "%04d:%02d:%02d", (int)((timestamp/(3600*1000))%24), (int)((timestamp/(1000*60))%60),
(int)((timestamp/1000)%60));
sprintf((char *)time_str, "%"PRIu32":%u:%u",
(__u32)((timestamp_sec/3600)%24),
(__u8)((timestamp_sec/60)%60),
(__u8)(timestamp_sec%60));
printf("%s", time_str);
printf(" ");
} else if (wdc_is_sn861(device_id)) {
printf(" ");
char timestamp[20];
__u64 hour;
__u8 min;
__u8 sec;
__u64 timestamp_sec;
timestamp_sec =
le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp)
/ 1000;
hour = timestamp_sec / 3600;
min = (timestamp_sec % 3600) / 60;
sec = timestamp_sec % 60;
sprintf(timestamp,
"%"PRIu64":%02"PRIu8":%02"PRIu8,
(uint64_t)hour, min, sec);
printf("%-11s", timestamp);
printf(" ");
} else {
printf(" ");
uint64_t timestamp = (0x0000FFFFFFFFFFFF & le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp));
printf("%16"PRIu64"", timestamp);
printf(" ");
}
@ -5735,13 +5710,15 @@ static void wdc_print_fw_act_history_log_json(__u8 *data, int num_entries,
char new_fw[9];
char commit_action_bin[8];
char fail_str[32];
char time_str[11];
char time_str[100];
char ext_time_str[20];
uint64_t timestamp;
__u64 timestamp_sec;
memset((void *)previous_fw, 0, 9);
memset((void *)new_fw, 0, 9);
memset((void *)commit_action_bin, 0, 8);
memset((void *)time_str, 0, 11);
memset((void *)time_str, '\0', 100);
memset((void *)ext_time_str, 0, 20);
memset((void *)fail_str, 0, 11);
char *null_fw = "--------";
@ -5781,33 +5758,25 @@ static void wdc_print_fw_act_history_log_json(__u8 *data, int num_entries,
json_object_add_value_int(root, "Entry",
le16_to_cpu(fw_act_history_entry->entry[entryIdx].fw_act_hist_entries));
timestamp = (0x0000FFFFFFFFFFFF &
le64_to_cpu(
fw_act_history_entry->entry[entryIdx].timestamp));
timestamp_sec = timestamp / 1000;
if (cust_id == WDC_CUSTOMER_ID_0x1005) {
sprintf((char *)time_str, "%04d:%02d:%02d", (int)(le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp)/3600),
(int)((le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp%3600)/60)),
(int)(le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp%60)));
sprintf((char *)time_str, "%"PRIu32":%u:%u",
(__u32)(timestamp_sec/3600),
(__u8)(timestamp_sec%3600/60),
(__u8)(timestamp_sec%60));
json_object_add_value_string(root, "Power on Hour", time_str);
} else if (vendor_id == WDC_NVME_SNDK_VID) {
uint64_t timestamp = (0x0000FFFFFFFFFFFF & le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp));
sprintf((char *)time_str, "%04d:%02d:%02d", (int)((timestamp/(3600*1000))%24), (int)((timestamp/(1000*60))%60),
(int)((timestamp/1000)%60));
sprintf((char *)time_str, "%"PRIu32":%u:%u",
(__u32)((timestamp_sec/3600)%24),
(__u8)((timestamp_sec/60)%60),
(__u8)(timestamp_sec%60));
json_object_add_value_string(root, "Power on Hour", time_str);
} else if (wdc_is_sn861(device_id)) {
__u64 timestamp_sec =
le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp)
/ 1000;
sprintf((char *)ext_time_str,
"%"PRIu64":%02"PRIu8":%02"PRIu8,
(uint64_t)(__u64)(timestamp_sec/3600),
(__u8)((timestamp_sec%3600)/60),
(__u8)(timestamp_sec%60));
json_object_add_value_string(root, "Power on Hour", ext_time_str);
} else {
uint64_t timestamp = (0x0000FFFFFFFFFFFF & le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp));
json_object_add_value_uint64(root, "Timestamp", timestamp);
}
@ -7047,39 +7016,23 @@ static int wdc_get_c0_log_page(nvme_root_t r, struct nvme_dev *dev, char *format
switch (device_id) {
case WDC_NVME_SN640_DEV_ID:
fallthrough;
case WDC_NVME_SN640_DEV_ID_1:
fallthrough;
case WDC_NVME_SN640_DEV_ID_2:
fallthrough;
case WDC_NVME_SN640_DEV_ID_3:
fallthrough;
case WDC_NVME_SN840_DEV_ID:
fallthrough;
case WDC_NVME_SN840_DEV_ID_1:
fallthrough;
case WDC_NVME_SN860_DEV_ID:
fallthrough;
case WDC_NVME_SN560_DEV_ID_1:
fallthrough;
case WDC_NVME_SN560_DEV_ID_2:
fallthrough;
case WDC_NVME_SN560_DEV_ID_3:
fallthrough;
case WDC_NVME_SN550_DEV_ID:
ret = wdc_get_c0_log_page_sn(r, dev, uuid_index, format, namespace_id, fmt);
break;
case WDC_NVME_SN650_DEV_ID:
fallthrough;
case WDC_NVME_SN650_DEV_ID_1:
fallthrough;
case WDC_NVME_SN650_DEV_ID_2:
fallthrough;
case WDC_NVME_SN650_DEV_ID_3:
fallthrough;
case WDC_NVME_SN650_DEV_ID_4:
fallthrough;
case WDC_NVME_SN655_DEV_ID:
if (uuid_index == 0) {
log_id = WDC_NVME_GET_SMART_CLOUD_ATTR_LOG_ID;
@ -7137,9 +7090,7 @@ static int wdc_get_c0_log_page(nvme_root_t r, struct nvme_dev *dev, char *format
}
free(data);
break;
case WDC_NVME_ZN350_DEV_ID:
fallthrough;
case WDC_NVME_ZN350_DEV_ID_1:
data = (__u8 *)malloc(sizeof(__u8) * WDC_NVME_SMART_CLOUD_ATTR_LEN);
if (!data) {
@ -7405,17 +7356,11 @@ static int wdc_get_ca_log_page(nvme_root_t r, struct nvme_dev *dev, char *format
}
break;
case WDC_NVME_SN640_DEV_ID:
fallthrough;
case WDC_NVME_SN640_DEV_ID_1:
fallthrough;
case WDC_NVME_SN640_DEV_ID_2:
fallthrough;
case WDC_NVME_SN640_DEV_ID_3:
fallthrough;
case WDC_NVME_SN840_DEV_ID:
fallthrough;
case WDC_NVME_SN840_DEV_ID_1:
fallthrough;
case WDC_NVME_SN860_DEV_ID:
if (cust_id == WDC_CUSTOMER_ID_0x1005) {
data = (__u8 *)malloc(sizeof(__u8) * WDC_FB_CA_LOG_BUF_LEN);
@ -9074,6 +9019,7 @@ static int wdc_get_fw_act_history_C2(nvme_root_t r, struct nvme_dev *dev,
enum nvme_print_flags fmt;
__u8 *data;
int ret;
bool c2GuidMatch = false;
if (!wdc_check_device(r, dev))
return -1;
@ -9102,29 +9048,40 @@ static int wdc_get_fw_act_history_C2(nvme_root_t r, struct nvme_dev *dev,
nvme_show_status(ret);
if (!ret) {
/* parse the data */
/* Get the log page data and verify the GUID */
fw_act_history_log = (struct wdc_fw_act_history_log_format_c2 *)(data);
tot_entries = le32_to_cpu(fw_act_history_log->num_entries);
if (tot_entries > 0) {
/* get the FW customer id */
if (!wdc_is_sn861(device_id)) {
cust_id = wdc_get_fw_cust_id(r, dev);
if (cust_id == WDC_INVALID_CUSTOMER_ID) {
fprintf(stderr,
"%s: ERROR: WDC: invalid customer id\n",
__func__);
ret = -1;
goto freeData;
c2GuidMatch = !memcmp(ocp_C2_guid,
fw_act_history_log->log_page_guid,
WDC_C2_GUID_LENGTH);
if (c2GuidMatch) {
/* parse the data */
tot_entries = le32_to_cpu(fw_act_history_log->num_entries);
if (tot_entries > 0) {
/* get the FW customer id */
if (!wdc_is_sn861(device_id)) {
cust_id = wdc_get_fw_cust_id(r, dev);
if (cust_id == WDC_INVALID_CUSTOMER_ID) {
fprintf(stderr,
"%s: ERROR: WDC: invalid customer id\n",
__func__);
ret = -1;
goto freeData;
}
}
num_entries = (tot_entries < WDC_MAX_NUM_ACT_HIST_ENTRIES) ?
tot_entries : WDC_MAX_NUM_ACT_HIST_ENTRIES;
ret = wdc_print_fw_act_history_log(data, num_entries,
fmt, cust_id, vendor_id, device_id);
} else {
fprintf(stderr, "INFO: WDC: No entries found.\n");
ret = 0;
}
num_entries = (tot_entries < WDC_MAX_NUM_ACT_HIST_ENTRIES) ? tot_entries :
WDC_MAX_NUM_ACT_HIST_ENTRIES;
ret = wdc_print_fw_act_history_log(data, num_entries,
fmt, cust_id, vendor_id, device_id);
} else {
fprintf(stderr, "INFO: WDC: No FW Activate History entries found.\n");
ret = 0;
} else {
fprintf(stderr, "ERROR: WDC: Invalid C2 log page GUID\n");
ret = -1;
}
} else {
fprintf(stderr, "ERROR: WDC: Unable to read FW Activate History Log Page data\n");
@ -9143,7 +9100,7 @@ static int wdc_vs_fw_activate_history(int argc, char **argv, struct command *com
__u64 capabilities = 0;
struct nvme_dev *dev;
nvme_root_t r;
int ret;
int ret = -1;
struct config {
char *output_format;
@ -9171,61 +9128,23 @@ static int wdc_vs_fw_activate_history(int argc, char **argv, struct command *com
}
if (capabilities & WDC_DRIVE_CAP_FW_ACTIVATE_HISTORY) {
int uuid_index = 0;
bool c0GuidMatch = false;
__u8 *data;
int i;
/*
* check for the GUID in the 0xC0 log page to determine which log page to use to
* retrieve fw activate history data
*/
data = (__u8 *)malloc(sizeof(__u8) * WDC_NVME_SMART_CLOUD_ATTR_LEN);
if (!data) {
fprintf(stderr, "ERROR: WDC: malloc: %s\n", strerror(errno));
__u32 cust_fw_id = 0;
/* get the FW customer id */
cust_fw_id = wdc_get_fw_cust_id(r, dev);
if (cust_fw_id == WDC_INVALID_CUSTOMER_ID) {
fprintf(stderr, "%s: ERROR: WDC: invalid customer id\n", __func__);
ret = -1;
goto out;
}
/* Get the 0xC0 log data */
struct nvme_get_log_args args = {
.args_size = sizeof(args),
.fd = dev_fd(dev),
.lid = WDC_NVME_GET_SMART_CLOUD_ATTR_LOG_ID,
.nsid = 0xFFFFFFFF,
.lpo = 0,
.lsp = NVME_LOG_LSP_NONE,
.lsi = 0,
.rae = false,
.uuidx = uuid_index,
.csi = NVME_CSI_NVM,
.ot = false,
.len = WDC_NVME_SMART_CLOUD_ATTR_LEN,
.log = data,
.timeout = NVME_DEFAULT_IOCTL_TIMEOUT,
.result = NULL,
};
ret = nvme_get_log(&args);
if (!ret) {
/* Verify GUID matches */
for (i = 0; i < 16; i++) {
if (scao_guid[i] != data[SCAO_LPG + i]) {
c0GuidMatch = false;
break;
}
}
if (i == 16)
c0GuidMatch = true;
}
free(data);
if (c0GuidMatch)
if ((cust_fw_id == WDC_CUSTOMER_ID_0x1004) ||
(cust_fw_id == WDC_CUSTOMER_ID_0x1008) ||
(cust_fw_id == WDC_CUSTOMER_ID_0x1005) ||
(cust_fw_id == WDC_CUSTOMER_ID_0x1304))
ret = wdc_get_fw_act_history_C2(r, dev, cfg.output_format);
else
ret = wdc_get_fw_act_history(r, dev, cfg.output_format);
} else {
} else if (capabilities & WDC_DRIVE_CAP_FW_ACTIVATE_HISTORY_C2) {
ret = wdc_get_fw_act_history_C2(r, dev, cfg.output_format);
}
@ -9648,10 +9567,10 @@ static int wdc_fetch_log_file_from_device(struct nvme_dev *dev, __u32 fileId,
__u16 spiDestn, __u64 fileSize, __u8 *dataBuffer)
{
int ret = WDC_STATUS_FAILURE;
__u32 chunckSize = WDC_DE_VU_READ_BUFFER_STANDARD_OFFSET;
__u32 maximumTransferLength = 0;
__u32 buffSize = 0;
__u64 offsetIdx = 0;
__u32 chunckSize = WDC_DE_VU_READ_BUFFER_STANDARD_OFFSET;
__u32 maximumTransferLength = 0;
__u32 buffSize = 0;
__u64 offsetIdx = 0;
if (!dev || !dataBuffer || !fileSize) {
ret = WDC_STATUS_INVALID_PARAMETER;
@ -9699,18 +9618,17 @@ end:
static int wdc_de_get_dump_trace(struct nvme_dev *dev, char *filePath, __u16 binFileNameLen, char *binFileName)
{
int ret = WDC_STATUS_FAILURE;
__u8 *readBuffer = NULL;
__u32 readBufferLen = 0;
__u32 lastPktReadBufferLen = 0;
__u32 maxTransferLen = 0;
__u32 dumptraceSize = 0;
__u32 chunkSize = 0;
__u32 chunks = 0;
__u32 offset = 0;
__u8 loop = 0;
__u16 i = 0;
__u32 maximumTransferLength = 0;
int ret = WDC_STATUS_FAILURE;
__u8 *readBuffer = NULL;
__u32 readBufferLen = 0;
__u32 lastPktReadBufferLen = 0;
__u32 maxTransferLen = 0;
__u32 dumptraceSize = 0;
__u32 chunkSize;
__u32 chunks;
__u32 offset;
__u32 i;
__u32 maximumTransferLength = 0;
if (!dev || !binFileName || !filePath) {
ret = WDC_STATUS_INVALID_PARAMETER;
@ -9759,7 +9677,7 @@ static int wdc_de_get_dump_trace(struct nvme_dev *dev, char *filePath, __u16 bin
}
for (i = 0; i < chunks; i++) {
offset = ((i*chunkSize) / 4);
offset = (i * chunkSize) / 4;
/* Last loop call, Assign readBufferLen to read only left over bytes */
if (i == (chunks - 1))
@ -9774,7 +9692,7 @@ static int wdc_de_get_dump_trace(struct nvme_dev *dev, char *filePath, __u16 bin
break;
}
}
} while (loop);
} while (0);
if (ret == WDC_STATUS_SUCCESS) {
ret = wdc_WriteToFile(binFileName, (char *)readBuffer, dumptraceSize);
@ -11678,7 +11596,6 @@ static int wdc_vs_drive_info(int argc, char **argv,
break;
case WDC_NVME_SN861_DEV_ID:
fallthrough;
case WDC_NVME_SN861_DEV_ID_1:
data_len = sizeof(info);
num_dwords = data_len / 4;

View file

@ -5,7 +5,7 @@
#if !defined(WDC_NVME) || defined(CMD_HEADER_MULTI_READ)
#define WDC_NVME
#define WDC_PLUGIN_VERSION "2.7.0"
#define WDC_PLUGIN_VERSION "2.8.1"
#include "cmd.h"
PLUGIN(NAME("wdc", "Western Digital vendor specific extensions", WDC_PLUGIN_VERSION),

View file

@ -192,5 +192,5 @@ bool wdc_CheckUuidListSupport(struct nvme_dev *dev, struct nvme_id_uuid_list *uu
bool wdc_UuidEqual(struct nvme_id_uuid_list_entry *entry1, struct nvme_id_uuid_list_entry *entry2)
{
return !memcmp(entry1, entry2, NVME_UUID_LEN);
return !memcmp(entry1->uuid, entry2->uuid, NVME_UUID_LEN);
}