Adding upstream version 4.2.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
16732c81e5
commit
4fd4995b67
279 changed files with 77998 additions and 0 deletions
50
clustermd_tests/00r10_Create
Normal file
50
clustermd_tests/00r10_Create
Normal file
|
@ -0,0 +1,50 @@
|
|||
#!/bin/bash
|
||||
|
||||
mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 $dev0 $dev1
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check $NODE1 resync
|
||||
check $NODE2 PENDING
|
||||
check all wait
|
||||
check all raid10
|
||||
check all bitmap
|
||||
check all nosync
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
mdadm -CR $md0 -l10 -b clustered -n3 --layout n3 $dev0 $dev1 $dev2 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2
|
||||
check all nosync
|
||||
check all raid10
|
||||
check all bitmap
|
||||
check all state UUU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
mdadm -CR $md0 -l10 -b clustered -n2 -x1 --layout n2 $dev0 $dev1 $dev2 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2
|
||||
check all nosync
|
||||
check all raid10
|
||||
check all bitmap
|
||||
check all spares 1
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
name=tstmd
|
||||
mdadm -CR $md0 -l10 -b clustered -n2 $dev0 $dev1 --layout n2 --name=$name --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid10
|
||||
check all bitmap
|
||||
check all state UU
|
||||
for ip in $NODE1 $NODE2
|
||||
do
|
||||
ssh $ip "mdadm -D $md0 | grep 'Name' | grep -q $name"
|
||||
[ $? -ne '0' ] &&
|
||||
die "$ip: check --name=$name failed."
|
||||
done
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
exit 0
|
50
clustermd_tests/00r1_Create
Normal file
50
clustermd_tests/00r1_Create
Normal file
|
@ -0,0 +1,50 @@
|
|||
#!/bin/bash
|
||||
|
||||
mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check $NODE1 resync
|
||||
check $NODE2 PENDING
|
||||
check all wait
|
||||
check all raid1
|
||||
check all bitmap
|
||||
check all nosync
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid1
|
||||
check all bitmap
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
mdadm -CR $md0 -l1 -b clustered -n2 -x1 $dev0 $dev1 $dev2 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2
|
||||
check all nosync
|
||||
check all raid1
|
||||
check all bitmap
|
||||
check all spares 1
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
name=tstmd
|
||||
mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --name=$name --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid1
|
||||
check all bitmap
|
||||
check all state UU
|
||||
for ip in $NODE1 $NODE2
|
||||
do
|
||||
ssh $ip "mdadm -D $md0 | grep 'Name' | grep -q $name"
|
||||
[ $? -ne '0' ] &&
|
||||
die "$ip: check --name=$name failed."
|
||||
done
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
exit 0
|
51
clustermd_tests/01r10_Grow_bitmap-switch
Normal file
51
clustermd_tests/01r10_Grow_bitmap-switch
Normal file
|
@ -0,0 +1,51 @@
|
|||
#!/bin/bash
|
||||
|
||||
mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 $dev0 $dev1 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid10
|
||||
check all bitmap
|
||||
check all state UU
|
||||
|
||||
# switch 'clustered' bitmap to 'none', and then 'none' to 'internal'
|
||||
stop_md $NODE2 $md0
|
||||
mdadm --grow $md0 --bitmap=none
|
||||
[ $? -eq '0' ] ||
|
||||
die "$NODE1: change bitmap 'clustered' to 'none' failed."
|
||||
mdadm -X $dev0 $dev1 &> /dev/null
|
||||
[ $? -eq '0' ] &&
|
||||
die "$NODE1: bitmap still exists in member_disks."
|
||||
check all nobitmap
|
||||
mdadm --grow $md0 --bitmap=internal
|
||||
[ $? -eq '0' ] ||
|
||||
die "$NODE1: change bitmap 'none' to 'internal' failed."
|
||||
sleep 1
|
||||
mdadm -X $dev0 $dev1 &> /dev/null
|
||||
[ $? -eq '0' ] ||
|
||||
die "$NODE1: create 'internal' bitmap failed."
|
||||
check $NODE1 bitmap
|
||||
|
||||
# switch 'internal' bitmap to 'none', and then 'none' to 'clustered'
|
||||
mdadm --grow $md0 --bitmap=none
|
||||
[ $? -eq '0' ] ||
|
||||
die "$NODE1: change bitmap 'internal' to 'none' failed."
|
||||
mdadm -X $dev0 $dev1 &> /dev/null
|
||||
[ $? -eq '0' ] &&
|
||||
die "$NODE1: bitmap still exists in member_disks."
|
||||
check $NODE1 nobitmap
|
||||
mdadm --grow $md0 --bitmap=clustered
|
||||
[ $? -eq '0' ] ||
|
||||
die "$NODE1: change bitmap 'none' to 'clustered' failed."
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
sleep 1
|
||||
for ip in $NODES
|
||||
do
|
||||
ssh $ip "mdadm -X $dev0 $dev1 | grep -q 'Cluster name'" ||
|
||||
die "$ip: create 'clustered' bitmap failed."
|
||||
done
|
||||
check all bitmap
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
exit 0
|
38
clustermd_tests/01r10_Grow_resize
Normal file
38
clustermd_tests/01r10_Grow_resize
Normal file
|
@ -0,0 +1,38 @@
|
|||
#!/bin/bash
|
||||
|
||||
size=20000
|
||||
|
||||
mdadm -CR $md0 -l10 -b clustered --layout n2 --size $size --chunk=64 -n2 $dev0 $dev1 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid10
|
||||
check all bitmap
|
||||
check all state UU
|
||||
|
||||
mdadm --grow $md0 --size max
|
||||
check $NODE1 resync
|
||||
check $NODE1 wait
|
||||
check all state UU
|
||||
|
||||
mdadm --grow $md0 --size $size
|
||||
check all nosync
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
mdadm -CR $md0 -l10 -b clustered --layout n2 --chunk=64 -n2 $dev0 $dev1 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid10
|
||||
check all bitmap
|
||||
check all state UU
|
||||
|
||||
mdadm --grow $md0 --chunk=128
|
||||
check $NODE1 reshape
|
||||
check $NODE1 wait
|
||||
check all chunk 128
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
exit 0
|
68
clustermd_tests/01r1_Grow_add
Normal file
68
clustermd_tests/01r1_Grow_add
Normal file
|
@ -0,0 +1,68 @@
|
|||
#!/bin/bash
|
||||
|
||||
mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid1
|
||||
check all bitmap
|
||||
check all state UU
|
||||
check all dmesg
|
||||
mdadm --grow $md0 --raid-devices=3 --add $dev2
|
||||
sleep 0.3
|
||||
grep recovery /proc/mdstat
|
||||
if [ $? -eq '0' ]
|
||||
then
|
||||
check $NODE1 wait
|
||||
else
|
||||
check $NODE2 recovery
|
||||
check $NODE2 wait
|
||||
fi
|
||||
check all state UUU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
mdadm -CR $md0 -l1 -b clustered -n2 -x1 $dev0 $dev1 $dev2 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2
|
||||
check all nosync
|
||||
check all raid1
|
||||
check all bitmap
|
||||
check all spares 1
|
||||
check all state UU
|
||||
check all dmesg
|
||||
mdadm --grow $md0 --raid-devices=3 --add $dev3
|
||||
sleep 0.3
|
||||
grep recovery /proc/mdstat
|
||||
if [ $? -eq '0' ]
|
||||
then
|
||||
check $NODE1 wait
|
||||
else
|
||||
check $NODE2 recovery
|
||||
check $NODE2 wait
|
||||
fi
|
||||
check all state UUU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
mdadm -CR $md0 -l1 -b clustered -n2 -x1 $dev0 $dev1 $dev2 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2
|
||||
check all nosync
|
||||
check all raid1
|
||||
check all bitmap
|
||||
check all spares 1
|
||||
check all state UU
|
||||
check all dmesg
|
||||
mdadm --grow $md0 --raid-devices=3
|
||||
sleep 0.3
|
||||
grep recovery /proc/mdstat
|
||||
if [ $? -eq '0' ]
|
||||
then
|
||||
check $NODE1 wait
|
||||
else
|
||||
check $NODE2 recovery
|
||||
check $NODE2 wait
|
||||
fi
|
||||
check all state UUU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
exit 0
|
51
clustermd_tests/01r1_Grow_bitmap-switch
Normal file
51
clustermd_tests/01r1_Grow_bitmap-switch
Normal file
|
@ -0,0 +1,51 @@
|
|||
#!/bin/bash
|
||||
|
||||
mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid1
|
||||
check all bitmap
|
||||
check all state UU
|
||||
|
||||
# switch 'clustered' bitmap to 'none', and then 'none' to 'internal'
|
||||
stop_md $NODE2 $md0
|
||||
mdadm --grow $md0 --bitmap=none
|
||||
[ $? -eq '0' ] ||
|
||||
die "$NODE1: change bitmap 'clustered' to 'none' failed."
|
||||
mdadm -X $dev0 $dev1 &> /dev/null
|
||||
[ $? -eq '0' ] &&
|
||||
die "$NODE1: bitmap still exists in member_disks."
|
||||
check all nobitmap
|
||||
mdadm --grow $md0 --bitmap=internal
|
||||
[ $? -eq '0' ] ||
|
||||
die "$NODE1: change bitmap 'none' to 'internal' failed."
|
||||
sleep 2
|
||||
mdadm -X $dev0 $dev1 &> /dev/null
|
||||
[ $? -eq '0' ] ||
|
||||
die "$NODE1: create 'internal' bitmap failed."
|
||||
check $NODE1 bitmap
|
||||
|
||||
# switch 'internal' bitmap to 'none', and then 'none' to 'clustered'
|
||||
mdadm --grow $md0 --bitmap=none
|
||||
[ $? -eq '0' ] ||
|
||||
die "$NODE1: change bitmap 'internal' to 'none' failed."
|
||||
mdadm -X $dev0 $dev1 &> /dev/null
|
||||
[ $? -eq '0' ] &&
|
||||
die "$NODE1: bitmap still exists in member_disks."
|
||||
check $NODE1 nobitmap
|
||||
mdadm --grow $md0 --bitmap=clustered
|
||||
[ $? -eq '0' ] ||
|
||||
die "$NODE1: change bitmap 'none' to 'clustered' failed."
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
sleep 2
|
||||
for ip in $NODES
|
||||
do
|
||||
ssh $ip "mdadm -X $dev0 $dev1 | grep -q 'Cluster name'" ||
|
||||
die "$ip: create 'clustered' bitmap failed."
|
||||
done
|
||||
check all bitmap
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
exit 0
|
23
clustermd_tests/01r1_Grow_resize
Normal file
23
clustermd_tests/01r1_Grow_resize
Normal file
|
@ -0,0 +1,23 @@
|
|||
#!/bin/bash
|
||||
|
||||
size=10000
|
||||
|
||||
mdadm -CR $md0 -l1 -b clustered --size $size -n2 $dev0 $dev1 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid1
|
||||
check all bitmap
|
||||
check all state UU
|
||||
|
||||
mdadm --grow $md0 --size max
|
||||
check $NODE1 resync
|
||||
check $NODE1 wait
|
||||
check all state UU
|
||||
|
||||
mdadm --grow $md0 --size $size
|
||||
check all nosync
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
exit 0
|
33
clustermd_tests/02r10_Manage_add
Normal file
33
clustermd_tests/02r10_Manage_add
Normal file
|
@ -0,0 +1,33 @@
|
|||
#!/bin/bash
|
||||
|
||||
mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 $dev0 $dev1 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid10
|
||||
check all bitmap
|
||||
check all state UU
|
||||
check all dmesg
|
||||
mdadm --manage $md0 --fail $dev0 --remove $dev0
|
||||
mdadm --zero $dev2
|
||||
mdadm --manage $md0 --add $dev2
|
||||
sleep 0.3
|
||||
check $NODE1 recovery
|
||||
check $NODE1 wait
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 $dev0 $dev1 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid10
|
||||
check all bitmap
|
||||
check all state UU
|
||||
check all dmesg
|
||||
mdadm --manage $md0 --add $dev2
|
||||
check all spares 1
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
exit 0
|
30
clustermd_tests/02r10_Manage_add-spare
Normal file
30
clustermd_tests/02r10_Manage_add-spare
Normal file
|
@ -0,0 +1,30 @@
|
|||
#!/bin/bash
|
||||
|
||||
mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 $dev0 $dev1 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid10
|
||||
check all bitmap
|
||||
check all state UU
|
||||
check all dmesg
|
||||
mdadm --manage $md0 --add-spare $dev2
|
||||
check all spares 1
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 -x1 $dev0 $dev1 $dev2 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2
|
||||
check all nosync
|
||||
check all raid10
|
||||
check all bitmap
|
||||
check all spares 1
|
||||
check all state UU
|
||||
check all dmesg
|
||||
mdadm --manage $md0 --add-spare $dev3
|
||||
check all spares 2
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
exit 0
|
18
clustermd_tests/02r10_Manage_re-add
Normal file
18
clustermd_tests/02r10_Manage_re-add
Normal file
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash
|
||||
|
||||
mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 $dev0 $dev1 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid10
|
||||
check all bitmap
|
||||
check all state UU
|
||||
check all dmesg
|
||||
mdadm --manage $md0 --fail $dev0 --remove $dev0
|
||||
mdadm --manage $md0 --re-add $dev0
|
||||
check $NODE1 recovery
|
||||
check all wait
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
exit 0
|
33
clustermd_tests/02r1_Manage_add
Normal file
33
clustermd_tests/02r1_Manage_add
Normal file
|
@ -0,0 +1,33 @@
|
|||
#!/bin/bash
|
||||
|
||||
mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid1
|
||||
check all bitmap
|
||||
check all state UU
|
||||
check all dmesg
|
||||
mdadm --manage $md0 --fail $dev0 --remove $dev0
|
||||
mdadm --zero $dev2
|
||||
mdadm --manage $md0 --add $dev2
|
||||
sleep 0.3
|
||||
check $NODE1 recovery
|
||||
check $NODE1 wait
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid1
|
||||
check all bitmap
|
||||
check all state UU
|
||||
check all dmesg
|
||||
mdadm --manage $md0 --add $dev2
|
||||
check all spares 1
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
exit 0
|
30
clustermd_tests/02r1_Manage_add-spare
Normal file
30
clustermd_tests/02r1_Manage_add-spare
Normal file
|
@ -0,0 +1,30 @@
|
|||
#!/bin/bash
|
||||
|
||||
mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid1
|
||||
check all bitmap
|
||||
check all state UU
|
||||
check all dmesg
|
||||
mdadm --manage $md0 --add-spare $dev2
|
||||
check all spares 1
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
mdadm -CR $md0 -l1 -b clustered -n2 -x1 $dev0 $dev1 $dev2 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2
|
||||
check all nosync
|
||||
check all raid1
|
||||
check all bitmap
|
||||
check all spares 1
|
||||
check all state UU
|
||||
check all dmesg
|
||||
mdadm --manage $md0 --add-spare $dev3
|
||||
check all spares 2
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
exit 0
|
16
clustermd_tests/02r1_Manage_re-add
Normal file
16
clustermd_tests/02r1_Manage_re-add
Normal file
|
@ -0,0 +1,16 @@
|
|||
#!/bin/bash
|
||||
|
||||
mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check all nosync
|
||||
check all raid1
|
||||
check all bitmap
|
||||
check all state UU
|
||||
check all dmesg
|
||||
mdadm --manage $md0 --fail $dev0 --remove $dev0
|
||||
mdadm --manage $md0 --re-add $dev0
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
exit 0
|
21
clustermd_tests/03r10_switch-recovery
Normal file
21
clustermd_tests/03r10_switch-recovery
Normal file
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
|
||||
mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 -x1 $dev0 $dev1 $dev2 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2
|
||||
check all nosync
|
||||
check all raid10
|
||||
check all bitmap
|
||||
check all spares 1
|
||||
check all state UU
|
||||
check all dmesg
|
||||
mdadm --manage $md0 --fail $dev0
|
||||
sleep 0.2
|
||||
check $NODE1 recovery
|
||||
stop_md $NODE1 $md0
|
||||
check $NODE2 recovery
|
||||
check $NODE2 wait
|
||||
check $NODE2 state UU
|
||||
check all dmesg
|
||||
stop_md $NODE2 $md0
|
||||
|
||||
exit 0
|
18
clustermd_tests/03r10_switch-resync
Normal file
18
clustermd_tests/03r10_switch-resync
Normal file
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash
|
||||
|
||||
mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 $dev0 $dev1
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check $NODE1 resync
|
||||
check $NODE2 PENDING
|
||||
stop_md $NODE1 $md0
|
||||
check $NODE2 resync
|
||||
check $NODE2 wait
|
||||
mdadm -A $md0 $dev0 $dev1
|
||||
check all raid10
|
||||
check all bitmap
|
||||
check all nosync
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
exit 0
|
21
clustermd_tests/03r1_switch-recovery
Normal file
21
clustermd_tests/03r1_switch-recovery
Normal file
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
|
||||
mdadm -CR $md0 -l1 -b clustered -n2 -x1 $dev0 $dev1 $dev2 --assume-clean
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2
|
||||
check all nosync
|
||||
check all raid1
|
||||
check all bitmap
|
||||
check all spares 1
|
||||
check all state UU
|
||||
check all dmesg
|
||||
mdadm --manage $md0 --fail $dev0
|
||||
sleep 0.3
|
||||
check $NODE1 recovery
|
||||
stop_md $NODE1 $md0
|
||||
check $NODE2 recovery
|
||||
check $NODE2 wait
|
||||
check $NODE2 state UU
|
||||
check all dmesg
|
||||
stop_md $NODE2 $md0
|
||||
|
||||
exit 0
|
18
clustermd_tests/03r1_switch-resync
Normal file
18
clustermd_tests/03r1_switch-resync
Normal file
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash
|
||||
|
||||
mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1
|
||||
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
|
||||
check $NODE1 resync
|
||||
check $NODE2 PENDING
|
||||
stop_md $NODE1 $md0
|
||||
check $NODE2 resync
|
||||
check $NODE2 wait
|
||||
mdadm -A $md0 $dev0 $dev1
|
||||
check all raid1
|
||||
check all bitmap
|
||||
check all nosync
|
||||
check all state UU
|
||||
check all dmesg
|
||||
stop_md all $md0
|
||||
|
||||
exit 0
|
43
clustermd_tests/cluster_conf
Normal file
43
clustermd_tests/cluster_conf
Normal file
|
@ -0,0 +1,43 @@
|
|||
# Prerequisite:
|
||||
# 1. The clustermd_tests/ cases only support to test 2-node-cluster, cluster
|
||||
# requires packages: 'pacemaker+corosync+sbd+crmsh', all packages link at
|
||||
# "https://github.com/ClusterLabs/", and also requires dlm resource running
|
||||
# on each node of cluster.
|
||||
# For quick start HA-cluster with SUSE distributions, refer to the chapter 6-8:
|
||||
# https://www.suse.com/documentation/sle-ha-12/install-quick/data/install-quick.html
|
||||
# For Redhat distributions, please refer to:
|
||||
# https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/high_availability_add-on_administration/index
|
||||
# 2. Setup ssh-access with no-authorized mode, it should be:
|
||||
# # 'ssh $node1 -l root ls' and 'ssh $node2 -l root ls' success on any node.
|
||||
# 3. Fill-up node-ip part and disks part as following.
|
||||
|
||||
# Set node1 as the master node, the cluster-md cases should run on this node,
|
||||
# and node2 is the slave node.
|
||||
# For example:
|
||||
# NODE1=192.168.1.100 (testing run here)
|
||||
# NODE2=192.168.1.101
|
||||
NODE1=
|
||||
NODE2=
|
||||
|
||||
# Provide the devlist for clustermd-testing, alternative: if set the step 1,
|
||||
# don't set step 2, and vice versa.
|
||||
# 1. Use ISCSI service to provide shared storage, then login ISCSI target via
|
||||
# to ISCSI_TARGET_ID and ISCSI_TARGET_IP on iscsi clients, commands like:
|
||||
# Execute on iscsi clients:
|
||||
# 1) discover the iscsi server.
|
||||
# # iscsiadm -m discovery -t st -p $ISCSI_TARGET_IP
|
||||
# 2) login and establish connection.
|
||||
# # iscsiadm -m node -T $ISCSI_TARGET_ID -p $ISCSI_TARGET_IP -l
|
||||
# Note:
|
||||
# On ISCSI server, must create all iscsi-luns in one target_id, recommend more
|
||||
# than 6 luns/disks for testing, and each disk should be: 100M < disk < 800M.
|
||||
# 2. If all cluster-nodes mounted the same disks directly, and the devname are
|
||||
# the same on all nodes, then put them to 'devlist'.
|
||||
|
||||
# For example: (Only set $ISCSI_TARGET_ID is enough if iscsi has already connected)
|
||||
# ISCSI_TARGET_ID=iqn.2018-01.example.com:clustermd-testing
|
||||
# ISCSI_TARGET_IP=192.168.1.102
|
||||
ISCSI_TARGET_ID=
|
||||
|
||||
#devlist=/dev/sda /dev/sdb /dev/sdc /dev/sdd
|
||||
devlist=
|
332
clustermd_tests/func.sh
Normal file
332
clustermd_tests/func.sh
Normal file
|
@ -0,0 +1,332 @@
|
|||
#!/bin/bash
|
||||
|
||||
check_ssh()
|
||||
{
|
||||
NODE1="$(grep '^NODE1' $CLUSTER_CONF | cut -d'=' -f2)"
|
||||
NODE2="$(grep '^NODE2' $CLUSTER_CONF | cut -d'=' -f2)"
|
||||
[ -z "$NODE1" -o -z "$NODE2" ] && {
|
||||
echo "Please provide node-ip in $CLUSTER_CONF."
|
||||
exit 1
|
||||
}
|
||||
for ip in $NODE1 $NODE2
|
||||
do
|
||||
ssh -o NumberOfPasswordPrompts=0 $ip -l root "pwd" > /dev/null
|
||||
[ $? -ne 0 ] && {
|
||||
echo "Please setup ssh-access with no-authorized mode."
|
||||
exit 1
|
||||
}
|
||||
done
|
||||
}
|
||||
|
||||
fetch_devlist()
|
||||
{
|
||||
ISCSI_ID="$(grep '^ISCSI_TARGET_ID' $CLUSTER_CONF | cut -d'=' -f2)"
|
||||
devlist="$(grep '^devlist' $CLUSTER_CONF | cut -d'=' -f2)"
|
||||
if [ ! -z "$ISCSI_ID" -a ! -z "$devlist" ]
|
||||
then
|
||||
echo "Config ISCSI_TARGET_ID or devlist in $CLUSTER_CONF."
|
||||
exit 1
|
||||
elif [ ! -z "$ISCSI_ID" -a -z "$devlist" ]
|
||||
then
|
||||
for ip in $NODE1 $NODE2
|
||||
do
|
||||
ssh $ip "ls /dev/disk/by-path/*$ISCSI_ID*" > /dev/null
|
||||
[ $? -ne 0 ] && {
|
||||
echo "$ip: No disks found in '$ISCSI_ID' connection."
|
||||
exit 1
|
||||
}
|
||||
done
|
||||
devlist=($(ls /dev/disk/by-path/*$ISCSI_ID*))
|
||||
fi
|
||||
# sbd disk cannot use in testing
|
||||
# Init devlist as an array
|
||||
i=''
|
||||
devlist=(${devlist[@]#$i})
|
||||
for i in ${devlist[@]}
|
||||
do
|
||||
sbd -d $i dump &> /dev/null
|
||||
[ $? -eq '0' ] && devlist=(${devlist[@]#$i})
|
||||
done
|
||||
for i in $(seq 0 ${#devlist[@]})
|
||||
do
|
||||
eval "dev$i=${devlist[$i]}"
|
||||
done
|
||||
[ "${#devlist[@]}" -lt 6 ] && {
|
||||
echo "Cluster-md testing requires 6 disks at least."
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
check_dlm()
|
||||
{
|
||||
if ! crm configure show | grep -q dlm
|
||||
then
|
||||
crm configure primitive dlm ocf:pacemaker:controld \
|
||||
op monitor interval=60 timeout=60 \
|
||||
meta target-role=Started &> /dev/null
|
||||
crm configure group base-group dlm
|
||||
crm configure clone base-clone base-group \
|
||||
meta interleave=true
|
||||
fi
|
||||
sleep 1
|
||||
for ip in $NODE1 $NODE2
|
||||
do
|
||||
ssh $ip "pgrep dlm_controld > /dev/null" || {
|
||||
echo "$ip: dlm_controld daemon doesn't exist."
|
||||
exit 1
|
||||
}
|
||||
done
|
||||
crm_mon -r -n1 | grep -iq "fail\|not" && {
|
||||
echo "Please clear cluster-resource errors."
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
check_env()
|
||||
{
|
||||
user=$(id -un)
|
||||
[ "X$user" = "Xroot" ] || {
|
||||
echo "testing can only be done as 'root'."
|
||||
exit 1
|
||||
}
|
||||
[ \! -x $mdadm ] && {
|
||||
echo "test: please run make everything before perform testing."
|
||||
exit 1
|
||||
}
|
||||
check_ssh
|
||||
commands=(mdadm iscsiadm bc modinfo dlm_controld
|
||||
udevadm crm crm_mon lsblk pgrep sbd)
|
||||
for ip in $NODE1 $NODE2
|
||||
do
|
||||
for cmd in ${commands[@]}
|
||||
do
|
||||
ssh $ip "which $cmd &> /dev/null" || {
|
||||
echo "$ip: $cmd, command not found!"
|
||||
exit 1
|
||||
}
|
||||
done
|
||||
mods=(raid1 raid10 md_mod dlm md-cluster)
|
||||
for mod in ${mods[@]}
|
||||
do
|
||||
ssh $ip "modinfo $mod > /dev/null" || {
|
||||
echo "$ip: $mod, module doesn't exist."
|
||||
exit 1
|
||||
}
|
||||
done
|
||||
ssh $ip "lsblk -a | grep -iq raid"
|
||||
[ $? -eq 0 ] && {
|
||||
echo "$ip: Please run testing without running RAIDs environment."
|
||||
exit 1
|
||||
}
|
||||
ssh $ip "modprobe md_mod"
|
||||
done
|
||||
fetch_devlist
|
||||
check_dlm
|
||||
[ -d $logdir ] || mkdir -p $logdir
|
||||
}
|
||||
|
||||
# $1/node, $2/optional
|
||||
stop_md()
|
||||
{
|
||||
if [ "$1" == "all" ]
|
||||
then
|
||||
NODES=($NODE1 $NODE2)
|
||||
elif [ "$1" == "$NODE1" -o "$1" == "$NODE2" ]
|
||||
then
|
||||
NODES=$1
|
||||
else
|
||||
die "$1: unknown parameter."
|
||||
fi
|
||||
if [ -z "$2" ]
|
||||
then
|
||||
for ip in ${NODES[@]}
|
||||
do
|
||||
ssh $ip mdadm -Ssq
|
||||
done
|
||||
else
|
||||
for ip in ${NODES[@]}
|
||||
do
|
||||
ssh $ip mdadm -S $2
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
# $1/optional, it shows why to save log
|
||||
save_log()
|
||||
{
|
||||
status=$1
|
||||
logfile="$status""$_basename".log
|
||||
|
||||
cat $targetdir/stderr >> $targetdir/log
|
||||
cp $targetdir/log $logdir/$_basename.log
|
||||
|
||||
for ip in $NODE1 $NODE2
|
||||
do
|
||||
echo "##$ip: saving dmesg." >> $logdir/$logfile
|
||||
ssh $ip "dmesg -c" >> $logdir/$logfile
|
||||
echo "##$ip: saving proc mdstat." >> $logdir/$logfile
|
||||
ssh $ip "cat /proc/mdstat" >> $logdir/$logfile
|
||||
array=($(ssh $ip "mdadm -Ds | cut -d' ' -f2"))
|
||||
|
||||
if [ ! -z "$array" -a ${#array[@]} -ge 1 ]
|
||||
then
|
||||
echo "##$ip: mdadm -D ${array[@]}" >> $logdir/$logfile
|
||||
ssh $ip "mdadm -D ${array[@]}" >> $logdir/$logfile
|
||||
md_disks=($(ssh $ip "mdadm -DY ${array[@]} | grep "/dev/" | cut -d'=' -f2"))
|
||||
cat /proc/mdstat | grep -q "bitmap"
|
||||
if [ $? -eq 0 ]
|
||||
then
|
||||
echo "##$ip: mdadm -X ${md_disks[@]}" >> $logdir/$logfile
|
||||
ssh $ip "mdadm -X ${md_disks[@]}" >> $logdir/$logfile
|
||||
echo "##$ip: mdadm -E ${md_disks[@]}" >> $logdir/$logfile
|
||||
ssh $ip "mdadm -E ${md_disks[@]}" >> $logdir/$logfile
|
||||
fi
|
||||
else
|
||||
echo "##$ip: no array assembled!" >> $logdir/$logfile
|
||||
fi
|
||||
done
|
||||
[ "$1" == "fail" ] &&
|
||||
echo "See $logdir/$_basename.log and $logdir/$logfile for details"
|
||||
stop_md all
|
||||
}
|
||||
|
||||
do_setup()
|
||||
{
|
||||
check_env
|
||||
ulimit -c unlimited
|
||||
}
|
||||
|
||||
do_clean()
|
||||
{
|
||||
for ip in $NODE1 $NODE2
|
||||
do
|
||||
ssh $ip "mdadm -Ssq; dmesg -c > /dev/null"
|
||||
done
|
||||
mdadm --zero ${devlist[@]} &> /dev/null
|
||||
}
|
||||
|
||||
cleanup()
|
||||
{
|
||||
check_ssh
|
||||
do_clean
|
||||
}
|
||||
|
||||
# check: $1/cluster_node $2/feature $3/optional
|
||||
check()
|
||||
{
|
||||
NODES=()
|
||||
if [ "$1" == "all" ]
|
||||
then
|
||||
NODES=($NODE1 $NODE2)
|
||||
elif [ "$1" == "$NODE1" -o "$1" == "$NODE2" ]
|
||||
then
|
||||
NODES=$1
|
||||
else
|
||||
die "$1: unknown parameter."
|
||||
fi
|
||||
case $2 in
|
||||
spares )
|
||||
for ip in ${NODES[@]}
|
||||
do
|
||||
spares=$(ssh $ip "tr '] ' '\012\012' < /proc/mdstat | grep -c '(S)'")
|
||||
[ "$spares" -ne "$3" ] &&
|
||||
die "$ip: expected $3 spares, but found $spares"
|
||||
done
|
||||
;;
|
||||
raid* )
|
||||
for ip in ${NODES[@]}
|
||||
do
|
||||
ssh $ip "grep -sq "$2" /proc/mdstat" ||
|
||||
die "$ip: check '$2' failed."
|
||||
done
|
||||
;;
|
||||
PENDING | recovery | resync | reshape )
|
||||
cnt=5
|
||||
for ip in ${NODES[@]}
|
||||
do
|
||||
while ! ssh $ip "grep -sq '$2' /proc/mdstat"
|
||||
do
|
||||
if [ "$cnt" -gt '0' ]
|
||||
then
|
||||
sleep 0.2
|
||||
cnt=$[cnt-1]
|
||||
else
|
||||
die "$ip: no '$2' happening!"
|
||||
fi
|
||||
done
|
||||
done
|
||||
;;
|
||||
wait )
|
||||
local cnt=60
|
||||
for ip in ${NODES[@]}
|
||||
do
|
||||
p=$(ssh $ip "cat /proc/sys/dev/raid/speed_limit_max")
|
||||
ssh $ip "echo 200000 > /proc/sys/dev/raid/speed_limit_max"
|
||||
while ssh $ip "grep -Esq '(resync|recovery|reshape|check|repair)' /proc/mdstat"
|
||||
do
|
||||
if [ "$cnt" -gt '0' ]
|
||||
then
|
||||
sleep 5
|
||||
cnt=$[cnt-1]
|
||||
else
|
||||
die "$ip: Check '$2' timeout over 300 seconds."
|
||||
fi
|
||||
done
|
||||
ssh $ip "echo $p > /proc/sys/dev/raid/speed_limit_max"
|
||||
done
|
||||
;;
|
||||
bitmap )
|
||||
for ip in ${NODES[@]}
|
||||
do
|
||||
ssh $ip "grep -sq '$2' /proc/mdstat" ||
|
||||
die "$ip: no '$2' found in /proc/mdstat."
|
||||
done
|
||||
;;
|
||||
nobitmap )
|
||||
for ip in ${NODES[@]}
|
||||
do
|
||||
ssh $ip "grep -sq 'bitmap' /proc/mdstat" &&
|
||||
die "$ip: 'bitmap' found in /proc/mdstat."
|
||||
done
|
||||
;;
|
||||
chunk )
|
||||
for ip in ${NODES[@]}
|
||||
do
|
||||
chunk_size=`awk -F',' '/chunk/{print $2}' /proc/mdstat | awk -F'[a-z]' '{print $1}'`
|
||||
[ "$chunk_size" -ne "$3" ] &&
|
||||
die "$ip: chunksize should be $3, but it's $chunk_size"
|
||||
done
|
||||
;;
|
||||
state )
|
||||
for ip in ${NODES[@]}
|
||||
do
|
||||
ssh $ip "grep -Esq 'blocks.*\[$3\]\$' /proc/mdstat" ||
|
||||
die "$ip: no '$3' found in /proc/mdstat."
|
||||
done
|
||||
;;
|
||||
nosync )
|
||||
for ip in ${NODES[@]}
|
||||
do
|
||||
ssh $ip "grep -Eq '(resync|recovery)' /proc/mdstat" &&
|
||||
die "$ip: resync or recovery is happening!"
|
||||
done
|
||||
;;
|
||||
readonly )
|
||||
for ip in ${NODES[@]}
|
||||
do
|
||||
ssh $ip "grep -sq "read-only" /proc/mdstat" ||
|
||||
die "$ip: check '$2' failed!"
|
||||
done
|
||||
;;
|
||||
dmesg )
|
||||
for ip in ${NODES[@]}
|
||||
do
|
||||
ssh $ip "dmesg | grep -iq 'error\|call trace\|segfault'" &&
|
||||
die "$ip: check '$2' prints errors!"
|
||||
done
|
||||
;;
|
||||
* )
|
||||
die "unknown parameter $2"
|
||||
;;
|
||||
esac
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue