1
0
Fork 0

Adding upstream version 1.14.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-22 12:57:21 +02:00
parent 515eb29eee
commit 9ae445a706
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
1041 changed files with 6076 additions and 1170 deletions

View file

@ -27,10 +27,13 @@
struct test_peer;
typedef int (*rx_test_fn)(struct test_peer *peer, void *buf, size_t len);
typedef int (*rx_test_fn)(struct test_peer *peer, void *buf, size_t len, int sd);
typedef int (*poll_test_fn)(struct test_peer *peer,
struct pollfd *fds, nfds_t nfds, int timeout);
#define TEST_PEER_SD_COMMANDS_IDX (0)
#define TEST_PEER_SD_AEMS_IDX (1)
/* Our fake MCTP "peer".
*
* The terms TX (transmit) and RX (receive) are from the perspective of
@ -64,17 +67,19 @@ static struct test_peer {
void *poll_data;
/* store sd from socket() setup */
int sd;
int sd[2];
} test_peer;
/* ensure tests start from a standard state */
void reset_test_peer(void)
{
int tmp = test_peer.sd;
int temp_sd[2] = {test_peer.sd[TEST_PEER_SD_COMMANDS_IDX],
test_peer.sd[TEST_PEER_SD_AEMS_IDX]};
memset(&test_peer, 0, sizeof(test_peer));
test_peer.tx_buf[0] = NVME_MI_MSGTYPE_NVME;
test_peer.rx_buf[0] = NVME_MI_MSGTYPE_NVME;
test_peer.sd = tmp;
memcpy(test_peer.sd, temp_sd, 2*sizeof(*temp_sd));
}
/* calculate MIC of peer-to-libnvme data, expand buf by 4 bytes and insert
@ -93,18 +98,25 @@ static void test_set_tx_mic(struct test_peer *peer)
peer->tx_buf_len += sizeof(crc_le);
}
int __wrap_socket(int family, int type, int protocol)
int __wrap_msg_socket(void)
{
/* we do an open here to give the mi-mctp code something to close() */
test_peer.sd = open("/dev/null", 0);
return test_peer.sd;
test_peer.sd[TEST_PEER_SD_COMMANDS_IDX] = open("/dev/null", 0);
return test_peer.sd[TEST_PEER_SD_COMMANDS_IDX];
}
int __wrap_aem_socket(__u8 eid, unsigned int network)
{
/* we do an open here to give the mi-mctp code something to close() */
test_peer.sd[TEST_PEER_SD_AEMS_IDX] = open("/dev/null", 0);
return test_peer.sd[TEST_PEER_SD_AEMS_IDX];
}
ssize_t __wrap_sendmsg(int sd, const struct msghdr *hdr, int flags)
{
size_t i, pos;
assert(sd == test_peer.sd);
assert(sd == test_peer.sd[TEST_PEER_SD_COMMANDS_IDX]);
test_peer.rx_buf[0] = NVME_MI_MSGTYPE_NVME;
@ -128,13 +140,23 @@ ssize_t __wrap_recvmsg(int sd, struct msghdr *hdr, int flags)
{
size_t i, pos, len;
assert(sd == test_peer.sd);
assert(sd == test_peer.sd[TEST_PEER_SD_COMMANDS_IDX] ||
sd == test_peer.sd[TEST_PEER_SD_AEMS_IDX]);
//Check for purge case
if (flags & MSG_TRUNC)
return 0;
if (test_peer.tx_fn) {
test_peer.tx_fn_res = test_peer.tx_fn(&test_peer,
test_peer.rx_buf,
test_peer.rx_buf_len);
test_peer.rx_buf_len,
sd);
} else {
if (sd == test_peer.sd[TEST_PEER_SD_COMMANDS_IDX] && test_peer.tx_buf_len == 0) {
errno = EAGAIN;
return -1;
}
/* set up a few default response fields; caller may have
* initialised the rest of the response */
test_peer.tx_buf[0] = NVME_MI_MSGTYPE_NVME;
@ -157,6 +179,7 @@ ssize_t __wrap_recvmsg(int sd, struct msghdr *hdr, int flags)
errno = test_peer.tx_errno;
test_peer.tx_buf_len = 0; //Clear since this is sent
return test_peer.tx_rc ?: (pos - 1);
}
@ -173,14 +196,14 @@ struct mctp_ioc_tag_ctl;
#ifdef SIOCMCTPALLOCTAG
int test_ioctl_tag(int sd, unsigned long req, struct mctp_ioc_tag_ctl *ctl)
{
assert(sd == test_peer.sd);
assert(sd == test_peer.sd[TEST_PEER_SD_COMMANDS_IDX]);
switch (req) {
case SIOCMCTPALLOCTAG:
ctl->tag = 1 | MCTP_TAG_PREALLOC | MCTP_TAG_OWNER;
break;
case SIOCMCTPDROPTAG:
assert(tag == 1 | MCTP_TAG_PREALLOC | MCTP_TAG_OWNER);
assert(ctl->tag == (1 | MCTP_TAG_PREALLOC | MCTP_TAG_OWNER));
break;
};
@ -189,13 +212,14 @@ int test_ioctl_tag(int sd, unsigned long req, struct mctp_ioc_tag_ctl *ctl)
#else
int test_ioctl_tag(int sd, unsigned long req, struct mctp_ioc_tag_ctl *ctl)
{
assert(sd == test_peer.sd);
assert(sd == test_peer.sd[TEST_PEER_SD_COMMANDS_IDX]);
return 0;
}
#endif
static struct __mi_mctp_socket_ops ops = {
__wrap_socket,
__wrap_msg_socket,
__wrap_aem_socket,
__wrap_sendmsg,
__wrap_recvmsg,
__wrap_poll,
@ -214,7 +238,7 @@ static void test_rx_err(nvme_mi_ep_t ep, struct test_peer *peer)
assert(rc != 0);
}
static int tx_none(struct test_peer *peer, void *buf, size_t len)
static int tx_none(struct test_peer *peer, void *buf, size_t len, int sd)
{
return 0;
}
@ -474,10 +498,12 @@ struct mpr_tx_info {
size_t final_len;
};
static int tx_mpr(struct test_peer *peer, void *buf, size_t len)
static int tx_mpr(struct test_peer *peer, void *buf, size_t len, int sd)
{
struct mpr_tx_info *tx_info = peer->tx_data;
assert(sd == peer->sd[TEST_PEER_SD_COMMANDS_IDX]);
memset(peer->tx_buf, 0, sizeof(peer->tx_buf));
peer->tx_buf[0] = NVME_MI_MSGTYPE_NVME;
peer->tx_buf[1] = test_peer.rx_buf[1] | (NVME_MI_ROR_RSP << 7);
@ -594,12 +620,14 @@ static int poll_fn_mpr_poll(struct test_peer *peer, struct pollfd *fds,
return 1;
}
static int tx_fn_mpr_poll(struct test_peer *peer, void *buf, size_t len)
static int tx_fn_mpr_poll(struct test_peer *peer, void *buf, size_t len, int sd)
{
struct mpr_tx_info *tx_info = peer->tx_data;
struct mpr_poll_info *poll_info = peer->poll_data;
unsigned int mprt;
assert(sd == peer->sd[TEST_PEER_SD_COMMANDS_IDX]);
memset(peer->tx_buf, 0, sizeof(peer->tx_buf));
peer->tx_buf[0] = NVME_MI_MSGTYPE_NVME;
peer->tx_buf[1] = test_peer.rx_buf[1] | (NVME_MI_ROR_RSP << 7);
@ -713,6 +741,655 @@ static void test_mpr_mprt_zero(nvme_mi_ep_t ep, struct test_peer *peer)
assert(rc == 0);
}
enum aem_enable_state {
AEM_ES_GET_ENABLED,
AEM_ES_SET_TO_DISABLED,
AEM_ES_ENABLE_SET_ENABLED,
AEM_ES_PROCESS,
AEM_ES_ACK_RESPONSE,
AEM_ES_ACK_RECEIVED
};
enum aem_failure_condition {
AEM_FC_NONE,
AEM_FC_BAD_GET_CONFIG_HEADER_LEN,
AEM_FC_BAD_GET_CONFIG_TOTAL_LEN,
AEM_FC_BAD_GET_CONFIG_BUFFER_LEN,
AEM_FC_BAD_OCC_RSP_HDR_LEN_SYNC,
AEM_FC_BAD_OCC_RSP_TOTAL_LEN_SYNC,
AEM_FC_BAD_OCC_RSP_BUFFER_LEN_SYNC,
AEM_FC_BAD_OCC_RSP_HDR_LEN_AEM,
AEM_FC_BAD_OCC_RSP_TOTAL_LEN_AEM,
AEM_FC_BAD_OCC_RSP_BUFFER_LEN_AEM,
};
struct aem_rcv_enable_fn_data {
enum aem_enable_state state;
enum aem_failure_condition fc;
struct nvme_mi_aem_enabled_map ep_enabled_map;
struct nvme_mi_aem_enabled_map host_enabled_map;
struct nvme_mi_aem_enabled_map aem_during_process_map;
struct nvme_mi_aem_enabled_map ack_events_map;
struct nvme_mi_event *events[256];
int callback_count;
};
static void populate_tx_occ_list(bool aem_not_ack,
struct aem_rcv_enable_fn_data *fn_data, struct nvme_mi_aem_enabled_map *to_send)
{
struct nvme_mi_mi_resp_hdr *resp_hdr =
(struct nvme_mi_mi_resp_hdr *)test_peer.tx_buf;
struct nvme_mi_msg_hdr *mi_msg_hdr =
(struct nvme_mi_msg_hdr *)test_peer.tx_buf;
size_t hdr_len = sizeof(*resp_hdr);
struct nvme_mi_aem_occ_list_hdr *list_hdr =
(struct nvme_mi_aem_occ_list_hdr *)(resp_hdr+1);
//For AEM, the data is actually in request format
//since it originates from the endpoint
if (aem_not_ack) {
list_hdr = (struct nvme_mi_aem_occ_list_hdr *)(mi_msg_hdr+1);
hdr_len = sizeof(*mi_msg_hdr);
mi_msg_hdr->nmp = (NVME_MI_MT_AE << 3);
} else {
resp_hdr->status = 0;
}
list_hdr->aelver = 0;
list_hdr->aeolhl = sizeof(*list_hdr);
list_hdr->numaeo = 0;
__u32 aeoltl = list_hdr->aeolhl;
struct nvme_mi_aem_occ_data *data =
(struct nvme_mi_aem_occ_data *)(list_hdr+1);
for (int i = 0; i < 255; i++) {
if (fn_data->events[i] && to_send->enabled[i]) {
struct nvme_mi_event *event = fn_data->events[i];
list_hdr->numaeo++;
aeoltl += sizeof(struct nvme_mi_aem_occ_data);
aeoltl += event->spec_info_len +
event->vend_spec_info_len;
data->aelhlen = sizeof(*data);
if ((fn_data->fc == AEM_FC_BAD_OCC_RSP_HDR_LEN_SYNC && !aem_not_ack) ||
(fn_data->fc == AEM_FC_BAD_OCC_RSP_HDR_LEN_AEM && aem_not_ack))
data->aelhlen--;
data->aeoui.aeocidi = event->aeocidi;
data->aeoui.aeoi = event->aeoi;
data->aeoui.aessi = event->aessi;
data->aeosil = event->spec_info_len;
data->aeovsil = event->vend_spec_info_len;
if ((fn_data->fc == AEM_FC_BAD_OCC_RSP_TOTAL_LEN_SYNC &&
!aem_not_ack) ||
(fn_data->fc == AEM_FC_BAD_OCC_RSP_TOTAL_LEN_AEM &&
aem_not_ack))
aeoltl -= 1;
//Now the data
uint8_t *spec = (uint8_t *)(data+1);
if (data->aeosil) {
memcpy(spec, event->spec_info, event->spec_info_len);
spec += event->spec_info_len;
}
if (data->aeovsil) {
memcpy(spec, event->vend_spec_info, event->vend_spec_info_len);
spec += event->vend_spec_info_len;
}
data = (struct nvme_mi_aem_occ_data *)(spec);
}
}
nvme_mi_aem_aeolli_set_aeoltl(list_hdr, aeoltl);
test_peer.tx_buf_len = hdr_len + aeoltl;
if ((fn_data->fc == AEM_FC_BAD_OCC_RSP_BUFFER_LEN_SYNC && !aem_not_ack) ||
(fn_data->fc == AEM_FC_BAD_OCC_RSP_BUFFER_LEN_AEM && aem_not_ack))
test_peer.tx_buf_len--;
test_set_tx_mic(&test_peer);
}
static void check_aem_sync_message(struct nvme_mi_aem_enabled_map *expected_mask,
struct nvme_mi_aem_enabled_map *expected_state,
struct aem_rcv_enable_fn_data *fn_data)
{
//Check the RX buffer for the endpoint. We should be getting a CONFIG SET AEM
//with all enabled items disabled
struct nvme_mi_mi_req_hdr *req =
(struct nvme_mi_mi_req_hdr *)test_peer.rx_buf;
struct nvme_mi_aem_supported_list *list =
(struct nvme_mi_aem_supported_list *)(req+1);
assert(req->opcode == nvme_mi_mi_opcode_configuration_set);
assert((le32_to_cpu(req->cdw0) & 0xFF) == NVME_MI_CONFIG_AE);
assert(list->hdr.aeslver == 0);
int count = 0;
//Count how many events we want to act are in the expected state
for (int i = 0; i < 256; i++) {
if (expected_mask->enabled[i])
count++;
}
assert(list->hdr.numaes == count);
assert(list->hdr.aeslhl == sizeof(struct nvme_mi_aem_supported_list));
assert(list->hdr.aest == list->hdr.aeslhl +
count * sizeof(struct nvme_mi_aem_supported_item));
struct nvme_mi_aem_supported_item *item =
(struct nvme_mi_aem_supported_item *)(list+1);
//Check the items
for (int i = 0; i < 256; i++) {
if (expected_mask->enabled[i]) {
bool found = false;
for (int j = 0; j < count; j++) {
if (nvme_mi_aem_aesi_get_aesid(item[j].aesi) == i &&
nvme_mi_aem_aesi_get_aese(item[j].aesi) ==
expected_state->enabled[i]) {
found = true;
break;
}
}
assert(found);
}
}
}
static int aem_rcv_enable_fn(struct test_peer *peer, void *buf, size_t len, int sd)
{
struct aem_rcv_enable_fn_data *fn_data = peer->tx_data;
struct nvme_mi_mi_resp_hdr *tx_hdr = (struct nvme_mi_mi_resp_hdr *)peer->tx_buf;
/* set up a few default response fields; caller may have
* initialised the rest of the response
*/
test_peer.tx_buf[0] = NVME_MI_MSGTYPE_NVME;
test_peer.tx_buf[1] = test_peer.rx_buf[1] | (NVME_MI_ROR_RSP << 7);
tx_hdr->status = 0;
switch (fn_data->state) {
case AEM_ES_GET_ENABLED:
{
assert(sd == peer->sd[TEST_PEER_SD_COMMANDS_IDX]);
//First, we want to return some data about what is already enabled
struct nvme_mi_aem_supported_list_header *list_hdr =
(struct nvme_mi_aem_supported_list_header *)(tx_hdr+1);
if (fn_data->fc == AEM_FC_BAD_GET_CONFIG_HEADER_LEN)
list_hdr->aeslhl =
sizeof(struct nvme_mi_aem_supported_list_header) - 1;
else
list_hdr->aeslhl =
sizeof(struct nvme_mi_aem_supported_list_header);
list_hdr->aeslver = 0;
struct nvme_mi_aem_supported_item *item =
(struct nvme_mi_aem_supported_item *)(list_hdr+1);
int item_count = 0;
list_hdr->numaes = 0;
//Count how many events we want to act are enabled
for (int i = 0; i < 256; i++) {
if (fn_data->ep_enabled_map.enabled[i]) {
list_hdr->numaes++;
nvme_mi_aem_aesi_set_aesid(&item[item_count], i);
nvme_mi_aem_aesi_set_aee(&item[item_count], 1);
item[item_count].aesl =
sizeof(struct nvme_mi_aem_supported_item);
item_count++;
}
}
list_hdr->aest = list_hdr->aeslhl +
list_hdr->numaes * sizeof(struct nvme_mi_aem_supported_item);
if (fn_data->fc == AEM_FC_BAD_GET_CONFIG_TOTAL_LEN)
list_hdr->aest--;//Shrink
test_peer.tx_buf_len =
sizeof(struct nvme_mi_mi_resp_hdr) + list_hdr->aest;
if (fn_data->fc == AEM_FC_BAD_GET_CONFIG_BUFFER_LEN)
test_peer.tx_buf_len--;
test_set_tx_mic(&test_peer);
fn_data->state = AEM_ES_SET_TO_DISABLED;
break;
}
case AEM_ES_SET_TO_DISABLED:
{
assert(sd == peer->sd[TEST_PEER_SD_COMMANDS_IDX]);
struct nvme_mi_aem_enabled_map expected = {false};
//The items in the ep_enabled_map should get disabled
check_aem_sync_message(&fn_data->ep_enabled_map, &expected, fn_data);
//Need to queue a reasonable response with no OCC
struct nvme_mi_mi_resp_hdr *tx_hdr =
(struct nvme_mi_mi_resp_hdr *)test_peer.tx_buf;
struct nvme_mi_aem_occ_list_hdr *list_hdr =
(struct nvme_mi_aem_occ_list_hdr *)(tx_hdr+1);
list_hdr->aelver = 0;
list_hdr->aeolhl = sizeof(*list_hdr);
list_hdr->numaeo = 0;
nvme_mi_aem_aeolli_set_aeoltl(list_hdr, list_hdr->aeolhl);
test_peer.tx_buf_len = sizeof(struct nvme_mi_mi_resp_hdr) +
nvme_mi_aem_aeolli_get_aeoltl(list_hdr->aeolli);
test_set_tx_mic(&test_peer);
fn_data->state = AEM_ES_ENABLE_SET_ENABLED;
break;
}
case AEM_ES_ENABLE_SET_ENABLED:
assert(sd == peer->sd[TEST_PEER_SD_COMMANDS_IDX]);
//We should verify the right things are enabled
//The items in the host enable map should get enabled
check_aem_sync_message(&fn_data->host_enabled_map,
&fn_data->host_enabled_map, fn_data);
//Prepare an OCC list response
populate_tx_occ_list(false, fn_data, &fn_data->host_enabled_map);
fn_data->state = AEM_ES_PROCESS;
break;
case AEM_ES_PROCESS:
//This case is actually a TX without any request from the host
assert(sd == peer->sd[TEST_PEER_SD_AEMS_IDX]);
//Prepare an OCC list response
populate_tx_occ_list(true, fn_data, &fn_data->aem_during_process_map);
fn_data->state = AEM_ES_ACK_RESPONSE;
break;
case AEM_ES_ACK_RESPONSE:
assert(sd == peer->sd[TEST_PEER_SD_COMMANDS_IDX]);
//Prepare an OCC list response
populate_tx_occ_list(false, fn_data, &fn_data->ack_events_map);
fn_data->state = AEM_ES_ACK_RECEIVED;
break;
default:
assert(false);//Not expected
}
return 0;
}
enum nvme_mi_aem_handler_next_action aem_handler(nvme_mi_ep_t ep, size_t num_events, void *userdata)
{
struct aem_rcv_enable_fn_data *fn_data = userdata;
fn_data->callback_count++;
switch (fn_data->state) {
case AEM_ES_PROCESS:
case AEM_ES_ACK_RESPONSE:
case AEM_ES_ACK_RECEIVED:
{
//This means we just sent out first OCC data
int item_count = 0;
struct nvme_mi_aem_enabled_map *map;
//Count how many events we want to act are enabled
switch (fn_data->state) {
case AEM_ES_PROCESS:
map = &fn_data->host_enabled_map;
break;
case AEM_ES_ACK_RESPONSE:
map = &fn_data->aem_during_process_map;
break;
case AEM_ES_ACK_RECEIVED:
map = &fn_data->ack_events_map;
break;
default:
assert(false);
}
for (int i = 0; i < 256; i++)
if (map->enabled[i])
item_count++;
assert(num_events == item_count);
for (int i = 0; i < num_events; i++) {
struct nvme_mi_event *e = nvme_mi_aem_get_next_event(ep);
uint8_t idx = e->aeoi;
assert(fn_data->events[idx]);
assert(fn_data->host_enabled_map.enabled[idx]);
assert(fn_data->events[idx]->aeocidi == e->aeocidi);
assert(fn_data->events[idx]->aessi == e->aessi);
assert(fn_data->events[idx]->spec_info_len ==
e->spec_info_len);
assert(memcmp(fn_data->events[idx]->spec_info,
e->spec_info, e->spec_info_len) == 0);
assert(fn_data->events[idx]->vend_spec_info_len ==
e->vend_spec_info_len);
assert(memcmp(fn_data->events[idx]->vend_spec_info,
e->vend_spec_info, e->vend_spec_info_len) == 0);
}
assert(nvme_mi_aem_get_next_event(ep) == NULL);
break;
}
default:
assert(false);
}
return NVME_MI_AEM_HNA_ACK;
}
static void aem_test_aem_api_helper(nvme_mi_ep_t ep,
struct nvme_mi_aem_config *config, int expected_event_count)
{
struct aem_rcv_enable_fn_data *fn_data =
(struct aem_rcv_enable_fn_data *)test_peer.tx_data;
int rc = 0;
test_peer.tx_fn = aem_rcv_enable_fn;
//This should not work outside the handler
assert(nvme_mi_aem_get_next_event(ep) == NULL);
rc = nvme_mi_aem_enable(ep, config, test_peer.tx_data);
assert(rc == 0);
//This should not work outside the handler
assert(nvme_mi_aem_get_next_event(ep) == NULL);
rc = nvme_mi_aem_process(ep, test_peer.tx_data);
assert(rc == 0);
//One for initial enable, one for AEM. No ACK events
assert(fn_data->callback_count == expected_event_count);
//This should not work outside the handler
assert(nvme_mi_aem_get_next_event(ep) == NULL);
}
static void aem_test_aem_disable_helper(nvme_mi_ep_t ep,
struct aem_rcv_enable_fn_data *fn_data)
{
memcpy(&fn_data->ep_enabled_map, &fn_data->host_enabled_map,
sizeof(fn_data->host_enabled_map));
fn_data->state = AEM_ES_GET_ENABLED;//This is the flow for disabling
assert(nvme_mi_aem_disable(ep) == 0);
}
static void test_mi_aem_ep_based_failure_helper(nvme_mi_ep_t ep,
enum aem_failure_condition fc, struct test_peer *peer)
{
struct aem_rcv_enable_fn_data fn_data = {0};
struct nvme_mi_aem_config config = {0};
config.aemd = 1;
config.aerd = 2;
config.enabled_map.enabled[3] = true;
fn_data.aem_during_process_map.enabled[3] = true;
struct nvme_mi_event e = {0};
e.aeoi = 3;
e.spec_info_len = 0;
fn_data.events[3] = &e;
memcpy(&fn_data.host_enabled_map, &config.enabled_map, sizeof(config.enabled_map));
config.aem_handler = aem_handler;
peer->tx_data = (void *) &fn_data;
peer->tx_fn = aem_rcv_enable_fn;
fn_data.fc = fc;
switch (fc) {
case AEM_FC_BAD_GET_CONFIG_HEADER_LEN:
case AEM_FC_BAD_GET_CONFIG_TOTAL_LEN:
case AEM_FC_BAD_GET_CONFIG_BUFFER_LEN:
case AEM_FC_BAD_OCC_RSP_HDR_LEN_SYNC:
case AEM_FC_BAD_OCC_RSP_TOTAL_LEN_SYNC:
case AEM_FC_BAD_OCC_RSP_BUFFER_LEN_SYNC:
//These all should fail before processing
assert(nvme_mi_aem_enable(ep, &config, &fn_data) == -1);
assert(errno == EPROTO);
break;
case AEM_FC_BAD_OCC_RSP_HDR_LEN_AEM:
case AEM_FC_BAD_OCC_RSP_TOTAL_LEN_AEM:
case AEM_FC_BAD_OCC_RSP_BUFFER_LEN_AEM:
//These should fail on the processing
assert(nvme_mi_aem_enable(ep, &config, &fn_data) == 0);
assert(nvme_mi_aem_process(ep, &fn_data) == -1);
assert(errno == EPROTO);
break;
default:
assert(false);//Unexpected
}
}
/* test: Check validation of endpoint messages in various stages of aem handling */
static void test_mi_aem_ep_based_failure_conditions(nvme_mi_ep_t ep, struct test_peer *peer)
{
test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_GET_CONFIG_HEADER_LEN, peer);
test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_GET_CONFIG_TOTAL_LEN, peer);
test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_GET_CONFIG_BUFFER_LEN, peer);
test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_OCC_RSP_HDR_LEN_SYNC, peer);
test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_OCC_RSP_HDR_LEN_AEM, peer);
test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_OCC_RSP_TOTAL_LEN_SYNC, peer);
test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_OCC_RSP_TOTAL_LEN_AEM, peer);
test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_OCC_RSP_BUFFER_LEN_SYNC, peer);
test_mi_aem_ep_based_failure_helper(ep, AEM_FC_BAD_OCC_RSP_BUFFER_LEN_AEM, peer);
}
/* test: Check aem process logic when API used improperly */
static void test_mi_aem_enable_invalid_usage(nvme_mi_ep_t ep, struct test_peer *peer)
{
struct nvme_mi_aem_config config = {0};
config.aem_handler = aem_handler;
config.enabled_map.enabled[0] = false;
config.aemd = 1;
config.aerd = 2;
//Call with invalid config due to nothing enabled
assert(nvme_mi_aem_enable(ep, &config, NULL) == -1);
config.aem_handler = NULL;
config.enabled_map.enabled[0] = true;
//Call with invalid config due to no callback
assert(nvme_mi_aem_enable(ep, &config, NULL) == -1);
//Call with invalid config due to being NULL
assert(nvme_mi_aem_enable(ep, NULL, NULL) == -1);
config.aem_handler = aem_handler;
config.enabled_map.enabled[0] = true;
//Call with invalid endpoint
assert(nvme_mi_aem_enable(NULL, &config, NULL) == -1);
}
/* test: Check aem process logic when API used improperly */
static void test_mi_aem_process_invalid_usage(nvme_mi_ep_t ep, struct test_peer *peer)
{
//Without calling enable first
assert(nvme_mi_aem_process(ep, NULL) == -1);
//Call with invalid ep
assert(nvme_mi_aem_process(NULL, NULL) == -1);
}
/* test: Check aem disable logic when API used improperly */
static void test_mi_aem_disable_invalid_usage(nvme_mi_ep_t ep, struct test_peer *peer)
{
assert(nvme_mi_aem_disable(NULL) == -1);
}
static void test_mi_aem_get_enabled_invalid_usage(nvme_mi_ep_t ep, struct test_peer *peer)
{
struct nvme_mi_aem_enabled_map map;
assert(nvme_mi_aem_get_enabled(ep, NULL) == -1);
assert(nvme_mi_aem_get_enabled(NULL, &map) == -1);
}
/* test: Check aem get enabled logic*/
static void test_mi_aem_get_enabled(nvme_mi_ep_t ep, struct test_peer *peer)
{
//When no events enabled on Endpoint
struct aem_rcv_enable_fn_data fn_data = {0};
struct nvme_mi_aem_enabled_map map;
test_peer.tx_fn = aem_rcv_enable_fn;
peer->tx_data = (void *) &fn_data;
fn_data.ep_enabled_map.enabled[8] = true;
fn_data.ep_enabled_map.enabled[20] = true;
fn_data.ep_enabled_map.enabled[51] = true;
fn_data.ep_enabled_map.enabled[255] = true;
assert(nvme_mi_aem_get_enabled(ep, &map) == 0);
assert(memcmp(&fn_data.ep_enabled_map, &map, sizeof(map)) == 0);
}
/* test: Check aem disable logic when called without an enable */
static void test_mi_aem_disable_no_enable(nvme_mi_ep_t ep, struct test_peer *peer)
{
//When no events enabled on Endpoint
struct aem_rcv_enable_fn_data fn_data = {0};
test_peer.tx_fn = aem_rcv_enable_fn;
peer->tx_data = (void *) &fn_data;
aem_test_aem_disable_helper(ep, &fn_data);
//When some events enabled on Endpoint
fn_data.ep_enabled_map.enabled[45] = true;
aem_test_aem_disable_helper(ep, &fn_data);
}
/* test: Check aem enable logic with ack carrying events */
static void test_mi_aem_api_w_ack_events(nvme_mi_ep_t ep, struct test_peer *peer)
{
struct aem_rcv_enable_fn_data fn_data = {0};
struct nvme_mi_aem_config config = {0};
config.aemd = 1;
config.aerd = 2;
peer->tx_data = (void *) &fn_data;
config.aem_handler = aem_handler;
config.enabled_map.enabled[5] = true;
config.enabled_map.enabled[15] = true;
fn_data.aem_during_process_map.enabled[5] = true;
//No ack_events_map will be enabled in this test
fn_data.ack_events_map.enabled[15] = true;
//Will have EP have nothing enabled at start (ep_enabled_map)
struct nvme_mi_event ev5 = {0};
ev5.aeoi = 5;
ev5.aeocidi = 2;
ev5.aessi = 3;
struct nvme_mi_event ev15 = {0};
uint8_t ev15_spec[] = { 45, 15};
ev15.aeoi = 15;
ev15.aeocidi = 60213;
ev15.aessi = 200;
ev15.spec_info = ev15_spec;
ev15.spec_info_len = sizeof(ev15_spec);
fn_data.events[5] = &ev5;
fn_data.events[15] = &ev15;
memcpy(&fn_data.host_enabled_map, &config.enabled_map, sizeof(config.enabled_map));
aem_test_aem_api_helper(ep, &config, 3);
aem_test_aem_disable_helper(ep, &fn_data);
}
/* test: Check aem enable logic */
static void test_mi_aem_api_simple(nvme_mi_ep_t ep, struct test_peer *peer)
{
struct aem_rcv_enable_fn_data fn_data = {0};
struct nvme_mi_aem_config config = {0};
config.aemd = 1;
config.aerd = 2;
peer->tx_data = (void *) &fn_data;
config.aem_handler = aem_handler;
config.enabled_map.enabled[1] = true;
config.enabled_map.enabled[3] = true;
config.enabled_map.enabled[16] = true;
fn_data.aem_during_process_map.enabled[3] = true;
//No ack_events_map will be enabled in this test
fn_data.ep_enabled_map.enabled[3] = true;
fn_data.ep_enabled_map.enabled[20] = true;
fn_data.ep_enabled_map.enabled[200] = true;
struct nvme_mi_event ev1 = {0};
uint8_t ev1_spec[] = { 98, 56, 32, 12};
ev1.aeoi = 1;
ev1.aeocidi = 2;
ev1.aessi = 3;
ev1.spec_info = ev1_spec;
ev1.spec_info_len = sizeof(ev1_spec);
struct nvme_mi_event ev3 = {0};
uint8_t ev3_spec[] = { 45, 15};
ev3.aeoi = 3;
ev3.aeocidi = 4;
ev3.aessi = 5;
ev3.spec_info = ev3_spec;
ev3.spec_info_len = sizeof(ev3_spec);
struct nvme_mi_event ev16 = {0};
ev16.aeoi = 16;
ev16.aeocidi = 6;
ev16.aessi = 7;
fn_data.events[1] = &ev1;
fn_data.events[3] = &ev3;
fn_data.events[16] = &ev16;
memcpy(&fn_data.host_enabled_map, &config.enabled_map, sizeof(config.enabled_map));
aem_test_aem_api_helper(ep, &config, 2);
aem_test_aem_disable_helper(ep, &fn_data);
}
#define DEFINE_TEST(name) { #name, test_ ## name }
struct test {
const char *name;
@ -737,6 +1414,15 @@ struct test {
DEFINE_TEST(mpr_timeouts),
DEFINE_TEST(mpr_timeout_clamp),
DEFINE_TEST(mpr_mprt_zero),
DEFINE_TEST(mi_aem_api_simple),
DEFINE_TEST(mi_aem_api_w_ack_events),
DEFINE_TEST(mi_aem_disable_no_enable),
DEFINE_TEST(mi_aem_process_invalid_usage),
DEFINE_TEST(mi_aem_enable_invalid_usage),
DEFINE_TEST(mi_aem_disable_invalid_usage),
DEFINE_TEST(mi_aem_get_enabled),
DEFINE_TEST(mi_aem_get_enabled_invalid_usage),
DEFINE_TEST(mi_aem_ep_based_failure_conditions),
};
static void run_test(struct test *test, FILE *logfd, nvme_mi_ep_t ep,

View file

@ -95,6 +95,10 @@ static const struct nvme_mi_transport test_transport = {
.submit = test_transport_submit,
.close = test_transport_close,
.desc_ep = test_transport_desc_ep,
//The following aren't actually used by the test_transport
.aem_fd = NULL,
.aem_purge = NULL,
.aem_read = NULL,
};
static void test_set_transport_callback(nvme_mi_ep_t ep, test_submit_cb cb,
@ -734,7 +738,7 @@ static void test_resp_invalid_type(nvme_mi_ep_t ep)
}
/* test: response with mis-matching command slot */
static int test_resp_csi_cb(struct nvme_mi_ep *ep,
static int test_resp_csi_invert_cb(struct nvme_mi_ep *ep,
struct nvme_mi_req *req,
struct nvme_mi_resp *resp,
void *data)
@ -744,15 +748,54 @@ static int test_resp_csi_cb(struct nvme_mi_ep *ep,
return 0;
}
static void test_resp_csi(nvme_mi_ep_t ep)
/* test: validation of proper csi setting */
static int test_resp_csi_check_cb(struct nvme_mi_ep *ep,
struct nvme_mi_req *req,
struct nvme_mi_resp *resp,
void *data)
{
assert((req->hdr->nmp & 1) == (ep->csi & 1));
return 0;
}
/* test: Ensure that csi bit is set properly in the request */
static void test_resp_csi_request(nvme_mi_ep_t ep)
{
struct nvme_mi_read_nvm_ss_info ss_info;
int rc;
test_set_transport_callback(ep, test_resp_csi_cb, NULL);
test_set_transport_callback(ep, test_resp_csi_check_cb, NULL);
rc = nvme_mi_mi_read_mi_data_subsys(ep, &ss_info);
assert(rc != 0);
nvme_mi_set_csi(ep, 1);//Change CSI
rc = nvme_mi_mi_read_mi_data_subsys(ep, &ss_info);
assert(rc != 0);
nvme_mi_set_csi(ep, 0);//Change CSI
}
/* test: Ensure that when csi bit set wrong in response,
* it results in an error
*/
static void test_resp_csi_mismatch(nvme_mi_ep_t ep)
{
struct nvme_mi_read_nvm_ss_info ss_info;
int rc;
test_set_transport_callback(ep, test_resp_csi_invert_cb, NULL);
rc = nvme_mi_mi_read_mi_data_subsys(ep, &ss_info);
assert(rc != 0);
nvme_mi_set_csi(ep, 1);//Change CSI
rc = nvme_mi_mi_read_mi_data_subsys(ep, &ss_info);
assert(rc != 0);
nvme_mi_set_csi(ep, 0);//Change CSI
}
/* test: config get MTU request & response layout, ensure we're handling
@ -2062,7 +2105,8 @@ struct test {
DEFINE_TEST(resp_req),
DEFINE_TEST(resp_hdr_small),
DEFINE_TEST(resp_invalid_type),
DEFINE_TEST(resp_csi),
DEFINE_TEST(resp_csi_request),
DEFINE_TEST(resp_csi_mismatch),
DEFINE_TEST(mi_config_get_mtu),
DEFINE_TEST(mi_config_set_freq),
DEFINE_TEST(mi_config_set_freq_invalid),

View file

@ -0,0 +1,6 @@
raw_nbft_size=315
host.id=78563412aaaabbbbcccc123456789012
host.nqn=nqn.2014-08.org.nvmexpress:uuid:12345678-aaaa-bbbb-cccc-123456789012
host.host_id_configured=1
host.host_nqn_configured=1
host.primary=0

View file

@ -0,0 +1,45 @@
raw_nbft_size=785
host.id=78563412aaaabbbbcccc123456789012
host.nqn=nqn.2014-08.org.nvmexpress:uuid:12345678-aaaa-bbbb-cccc-123456789012
host.host_id_configured=1
host.host_nqn_configured=1
host.primary=0
hfi_list[0]->index=1
hfi_list[0]->transport=tcp
hfi_list[0]->tcp_info.pci_sbdf=3329
hfi_list[0]->tcp_info.mac_addr=40a6b7c08ac9
hfi_list[0]->tcp_info.vlan=0
hfi_list[0]->tcp_info.ip_origin=3
hfi_list[0]->tcp_info.ipaddr=::
hfi_list[0]->tcp_info.subnet_mask_prefix=0
hfi_list[0]->tcp_info.gateway_ipaddr=::
hfi_list[0]->tcp_info.route_metric=500
hfi_list[0]->tcp_info.primary_dns_ipaddr=::
hfi_list[0]->tcp_info.secondary_dns_ipaddr=::
hfi_list[0]->tcp_info.dhcp_server_ipaddr=::
hfi_list[0]->tcp_info.host_name=(null)
hfi_list[0]->tcp_info.this_hfi_is_default_route=1
hfi_list[0]->tcp_info.dhcp_override=1
discovery_list[0]->index=1
discovery_list[0]->hfi->index=1
discovery_list[0]->uri=nvme+tcp://[4321:BBBB::1]:4420/
discovery_list[0]->nqn=nqn.2014-08.org.nvmexpress.discovery
subsystem_ns_list[0]->index=1
subsystem_ns_list[0]->discovery->index=1
subsystem_ns_list[0]->num_hfis=1
subsystem_ns_list[0]->hfis[0]->index=1
subsystem_ns_list[0]->transport=tcp
subsystem_ns_list[0]->traddr=4321:bbbb::1
subsystem_ns_list[0]->trsvcid=4420
subsystem_ns_list[0]->subsys_port_id=0
subsystem_ns_list[0]->nsid=0
subsystem_ns_list[0]->nid_type=0
subsystem_ns_list[0]->nid=0000000000000000
subsystem_ns_list[0]->subsys_nqn=nqn.2014-08.org.nvmexpress.discovery
subsystem_ns_list[0]->pdu_header_digest_required=0
subsystem_ns_list[0]->data_digest_required=0
subsystem_ns_list[0]->controller_id=0
subsystem_ns_list[0]->asqsz=0
subsystem_ns_list[0]->dhcp_root_path_string=(null)
subsystem_ns_list[0]->discovered=1
subsystem_ns_list[0]->unavailable=1

View file

@ -21,7 +21,9 @@ tables = [
'NBFT-Dell.PowerEdge.R760',
'NBFT-Dell.PowerEdge.R660-fw1.5.5-single',
'NBFT-Dell.PowerEdge.R660-fw1.5.5-mpath+discovery',
'NBFT-mpath+disc-ipv4+6_half'
'NBFT-mpath+disc-ipv4+6_half',
'NBFT-ipv6-noip+disc',
'NBFT-empty'
]
tables_bad = [

BIN
test/nbft/tables/NBFT-empty Normal file

Binary file not shown.

Binary file not shown.