1
0
Fork 0

Adding upstream version 4.2.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-14 05:52:19 +01:00
parent 16732c81e5
commit 4fd4995b67
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
279 changed files with 77998 additions and 0 deletions

25
tests/00linear Normal file
View file

@ -0,0 +1,25 @@
# create a simple linear
mdadm -CR $md0 -l linear -n3 $dev0 $dev1 $dev2
check linear
testdev $md0 3 $mdsize2_l 1
mdadm -S $md0
# now with version-0.90 superblock
mdadm -CR $md0 -e0.90 --level=linear -n4 $dev0 $dev1 $dev2 $dev3
check linear
testdev $md0 4 $mdsize0 1
mdadm -S $md0
# now with version-1.0 superblock
mdadm -CR $md0 -e1.0 --level=linear -n4 $dev0 $dev1 $dev2 $dev3
check linear
testdev $md0 4 $mdsize1 1
mdadm -S $md0
# now with no superblock
mdadm -B $md0 -l linear -n5 $dev0 $dev1 $dev2 $dev3 $dev4
check linear
testdev $md0 5 $size 64
mdadm -S $md0

29
tests/00multipath Normal file
View file

@ -0,0 +1,29 @@
#
# create a multipath, and fail and stuff
if [ "$MULTIPATH" != "yes" ]; then
echo -ne 'skipping... '
exit 0
fi
mdadm -CR $md1 -l multipath -n2 $path0 $path1
testdev $md1 1 $mdsize12 1
mdadm $md1 -f $path0
rotest $md1
testdev $md1 1 $mdsize12 1
mdadm $md1 -r $path0
mdadm $md1 -a $path0
rotest $md1
testdev $md1 1 $mdsize12 1
mdadm $md1 -f $path1
mdadm $md1 -r $path1
rotest $md1
testdev $md1 1 $mdsize12 1
mdadm -S $md1

13
tests/00names Normal file
View file

@ -0,0 +1,13 @@
set -x -e
# create arrays with non-numeric names
conf=$targetdir/mdadm.conf
echo "CREATE names=yes" > $conf
for i in linear raid0 raid1 raid4 raid5 raid6
do
mdadm -CR --config $conf /dev/md/$i -l $i -n 4 $dev4 $dev3 $dev2 $dev1
check $i
[ -d /sys/class/block/md_$i/md ]
mdadm -S md_$i
done

43
tests/00raid0 Normal file
View file

@ -0,0 +1,43 @@
# create a simple raid0
mdadm -CR $md0 -l raid0 -n3 $dev0 $dev1 $dev2
check raid0
testdev $md0 3 $mdsize2_l 512
mdadm -S $md0
# now with version-0.90 superblock
mdadm -CR $md0 -e0.90 -l0 -n4 $dev0 $dev1 $dev2 $dev3
check raid0
testdev $md0 4 $mdsize0 512
mdadm -S $md0
# now with no superblock
mdadm -B $md0 -l0 -n5 $dev0 $dev1 $dev2 $dev3 $dev4
check raid0
testdev $md0 5 $size 512
mdadm -S $md0
# now same again with different chunk size
for chunk in 4 32 256
do
mdadm -CR $md0 -e0.90 -l raid0 --chunk $chunk -n3 $dev0 $dev1 $dev2
check raid0
testdev $md0 3 $mdsize0 $chunk
mdadm -S $md0
# now with version-1 superblock
mdadm -CR $md0 -e1.0 -l0 -c $chunk -n4 $dev0 $dev1 $dev2 $dev3
check raid0
testdev $md0 4 $mdsize1 $chunk
mdadm -S $md0
# now with no superblock
mdadm -B $md0 -l0 -n5 --chun=$chunk $dev0 $dev1 $dev2 $dev3 $dev4
check raid0
testdev $md0 5 $size $chunk
mdadm -S $md0
done
exit 0

38
tests/00raid1 Normal file
View file

@ -0,0 +1,38 @@
# create a simple mirror
# test version0, version1, and no super
# test resync and recovery.
# It's just a sanity check. This command shouldn't run successfully
mdadm -CR $md0 -l 1 -n2 missing missing
check opposite_result
mdadm -CR $md0 -l 1 -n2 $dev0 $dev1
check resync
check raid1
testdev $md0 1 $mdsize1a 64
mdadm -S $md0
# now with version-0.90 superblock, spare
mdadm -CR $md0 -e0.90 --level=raid1 -n3 -x2 $dev0 missing missing $dev1 $dev2
check recovery
check raid1
testdev $md0 1 $mdsize0 64
mdadm -S $md0
# now with no superblock
mdadm -B $md0 -l mirror -n2 $dev0 $dev1
check resync
check raid1
testdev $md0 1 $size 1
mdadm -S $md0
# again, but with no resync
mdadm -B $md0 -l 1 --assume-clean -n2 $dev0 $dev1
check raid1
check nosync
testdev $md0 1 $size 1
mdadm -S $md0
exit 0

18
tests/00raid10 Normal file
View file

@ -0,0 +1,18 @@
# Create some raid10 arrays, all with 6 devices and one spare
devs="$dev0 $dev1 $dev2 $dev3 $dev4 $dev5 $dev6"
for lo in n2 n3 f2 f3
do
cm=1
case $lo in
f2 ) m=3 cm=2;;
f3 ) m=2 cm=3;;
n2 ) m=3;;
n3 ) m=2;;
esac
mdadm --create --run --level=raid10 --layout $lo --raid-disks 6 -x 1 $md0 $devs
check resync ; check raid10
testdev $md0 $m $mdsize1 $[512*cm]
mdadm -S $md0
done

16
tests/00raid4 Normal file
View file

@ -0,0 +1,16 @@
# create a simple raid4 set
mdadm -CfR $md0 -l 4 -n3 $dev0 $dev1 $dev2
check resync ; check raid[45]
testdev $md0 2 $mdsize1 512
mdadm -S $md0
# now with version-1 superblock
mdadm -CR $md0 -e1 --level=raid4 -n4 $dev0 $dev1 $dev2 $dev3
check recovery; check raid[45]
testdev $md0 3 $mdsize1 512
mdadm -S $md0
exit 0

33
tests/00raid5 Normal file
View file

@ -0,0 +1,33 @@
# create a simple raid5 set
mdadm -CfR $md0 -e 0.90 -l 5 -n3 $dev0 $dev1 $dev2
check resync
testdev $md0 2 $mdsize0 512
mdadm -S $md0
# now with version-1 superblock
mdadm -CR $md0 -e1 --level=raid5 -n4 $dev0 $dev1 $dev2 $dev3
check recovery
testdev $md0 3 $mdsize1 512
mdadm -S $md0
# now same again with explicit layout
for lo in la ra left-symmetric right-symmetric
do
mdadm -CfR $md0 -l 5 -p $lo -n3 $dev0 $dev1 $dev2
check resync ; check raid5
testdev $md0 2 $mdsize1 512
mdadm -S $md0
# now with version-1 superblock
mdadm -CR $md0 -e1 --level=raid5 --layout $lo -n4 $dev0 $dev1 $dev2 $dev3
check recovery ; check raid5
testdev $md0 3 $mdsize1 512
mdadm -S $md0
done
exit 0

16
tests/00raid6 Normal file
View file

@ -0,0 +1,16 @@
# create a simple raid6 set
mdadm -CfR $md0 -e0.90 -l 6 -n4 $dev0 $dev1 $dev2 $dev3
check resync ; check raid6
testdev $md0 2 $mdsize0 512
mdadm -S $md0
# now with version-1 superblock
mdadm -CR $md0 -e1 --level=raid6 -n5 $dev0 $dev1 $dev2 $dev3 $dev4
check resync ; check raid6
testdev $md0 3 $mdsize1 512
mdadm -S $md0
exit 0

22
tests/00readonly Normal file
View file

@ -0,0 +1,22 @@
#!/bin/bash
for metadata in 0.9 1.0 1.1 1.2
do
for level in linear raid0 raid1 raid4 raid5 raid6 raid10
do
mdadm -CR $md0 -l $level -n 4 --metadata=$metadata \
$dev1 $dev2 $dev3 $dev4 --assume-clean
check nosync
check $level
mdadm -ro $md0
check readonly
state=$(cat /sys/block/md0/md/array_state)
[ "$state" == "readonly" ] ||
die "array_state should be 'readonly', but is $state"
mdadm -w $md0
check $level
mdadm -S $md0
done
done
exit 0

29
tests/01r1fail Normal file
View file

@ -0,0 +1,29 @@
# create a raid1, fail and remove a drive during initial sync
# Add two more, fail and remove one
# wait for sync to complete, fail, remove, re-add
mdadm -CR $md0 -l1 -n4 $dev0 $dev1 $dev2 missing
check resync
mdadm $md0 --fail $dev2
check resync
mdadm $md0 --fail $dev1
sleep 1
check nosync
check state U___
mdadm $md0 --add $dev4 $dev3
check recovery
# there could be two separate recoveries, one for each dev
check wait
check wait
mdadm $md0 --remove $dev2 $dev1
check nosync
check state UUU_
mdadm --zero-superblock $dev2
mdadm $md0 -a $dev2
check recovery
check wait
check state UUUU
mdadm -S $md0

27
tests/01r5fail Normal file
View file

@ -0,0 +1,27 @@
# create a raid5, fail and remove a drive during initial sync
# Add two more, fail and remove one
# wait for sync to complete, fail, remove, re-add
mdadm -CR $md0 -l5 -n4 $dev0 $dev1 $dev2 $dev3
check recovery
mdadm $md0 --fail $dev3
sleep 1
check nosync
check state UUU_
mdadm $md0 --add $dev4 $dev5
check recovery
check wait
mdadm $md0 --fail $dev0
mdadm $md0 --remove $dev3 $dev0
check recovery
check state _UUU
mdadm $md0 -a $dev3
check recovery
check wait
check state UUUU
mdadm -S $md0

33
tests/01r5integ Normal file
View file

@ -0,0 +1,33 @@
# Check integrity of raid5 in degraded mode
# Create a 4 disk raid5, create a filesystem and
# sha1sum it with each device failed
if [ "$INTEGRITY" != "yes" ]; then
echo -ne 'skipping... '
exit 0
fi
for layout in ls rs la ra
do
mdadm -CR $md0 -l5 --layout $layout -n4 $dev0 $dev1 $dev2 $dev3
check wait
tar cf - /etc > $md0
sum=`sha1sum $md0`
for i in $dev0 $dev1 $dev2 $dev3
do
mdadm $md0 -f $i
mdadm $md0 -r $i
blockdev --flushbufs $md0
sum1=`sha1sum $md0`
if [ "$sum" != "$sum1" ]
then
echo $sum does not match $sum1 with $i missing
exit 1
fi
mdadm $md0 -a $i
while ! (check state 'U*'); do check wait; sleep 0.2; done
done
mdadm -S $md0
done

57
tests/01raid6integ Normal file
View file

@ -0,0 +1,57 @@
# Check integrity of raid6 in degraded modes
# Create a 5 disk raid6, dump some data to it, then
# sha1sum it with different pairs of devices failed
if [ "$INTEGRITY" != "yes" ]; then
echo -ne 'skipping... '
exit 0
fi
layouts='ls rs la ra'
lv=`uname -r`
if expr $lv '>=' 2.6.30 > /dev/null
then
layouts="$layouts parity-first ddf-zero-restart ddf-N-restart ddf-N-continue \
left-asymmetric-6 right-asymmetric-6 left-symmetric-6 right-symmetric-6 parity-first-6"
fi
for layout in $layouts
do
mdadm -CR $md0 -l6 --layout $layout -n5 $dev0 $dev1 $dev2 $dev3 $dev4
check wait
tar cf - /etc > $md0
sum=`sha1sum $md0`
totest=
for second in $dev0 $dev1 $dev2 $dev3 $dev4
do
mdadm $md0 -f $second
mdadm $md0 -r $second
blockdev --flushbufs $md0
sum1=`sha1sum $md0`
if [ "$sum" != "$sum1" ]
then
echo $sum does not match $sum1 with $second missing
exit 1
fi
for first in $totest
do
mdadm $md0 -f $first
mdadm $md0 -r $first
blockdev --flushbufs $md0
sum1=`sha1sum $md0`
if [ "$sum" != "$sum1" ]
then
echo $sum does not match $sum1 with $first and $second missing
exit 1
fi
mdadm $md0 -a $first
while ! (check state 'U*_U*'); do check wait; sleep 0.2; done
done
mdadm $md0 -a $second
while ! (check state 'U*'); do check wait; sleep 0.2; done
totest="$totest $second"
done
mdadm -S $md0
done

52
tests/01replace Normal file
View file

@ -0,0 +1,52 @@
set -x -e
## test --replace for raid5 raid6 raid1 and raid10
#1/ after replace, can remove replaced device
#2/ after --replace-with cannot remove the 'with' device
#3/ preserve integrity with concurrent failure
for level in 1 5 6 10
do
dd if=/dev/zero of=$dev4 bs=1M || true
dd if=/dev/zero of=$dev5 bs=1M || true
mdadm -CR $md0 -l $level -n4 -x2 $devlist5
dd if=/dev/urandom of=$md0 bs=1M || true
sum=`sha1sum < $md0`
check wait
mdadm $md0 --replace $dev1
check wait
mdadm $md0 --remove $dev1
mdadm $md0 --remove $dev5 && exit 1
mdadm -S $md0
dd if=/dev/zero of=$dev4 bs=1M || true
dd if=/dev/zero of=$dev5 bs=1M || true
mdadm -CR $md0 -l $level -n4 -x2 $devlist5
check wait
sum1=`sha1sum < $md0`
[ "$sum" == "$sum1" ]
mdadm $md0 --replace $dev1 --with $dev4
check wait
mdadm $md0 --remove $dev1
mdadm $md0 --remove $dev5
mdadm $md0 --remove $dev4 && exit 1
mdadm $md0 --add $dev1 $dev5
mdadm $md0 --replace $dev0
sleep 1
mdadm $md0 --fail $dev2
check wait
sum2=`sha1sum < $md0`
[ "$sum" == "$sum2" ]
mdadm $md0 --remove $dev0 $dev2
mdadm $md0 --add $dev0 $dev2
mdadm $md0 --replace $dev3
sleep 1
mdadm $md0 --fail $dev0 $dev2
check wait
sum3=`sha1sum < $md0`
[ "$sum" == "$sum3" ]
mdadm -S $md0
done

23
tests/02lineargrow Normal file
View file

@ -0,0 +1,23 @@
# create a liner array, and add more drives to to.
for e in 0.90 1 1.1 1.2
do
case $e in
0.90 ) sz=$mdsize0 ;;
1 ) sz=$mdsize2_l ;;
1.0 ) sz=$mdsize1 ;;
1.1 ) sz=$mdsize1_l ;;
1.2 ) sz=$mdsize2_l ;;
esac
mdadm -CRf $md0 --level linear -e $e --raid-disks=1 $dev1
testdev $md0 1 $sz 1
mdadm --grow $md0 --add $dev2
testdev $md0 2 $sz 1
mdadm --grow $md0 --add $dev3
testdev $md0 3 $sz 1
mdadm -S $md0
done

40
tests/02r1add Normal file
View file

@ -0,0 +1,40 @@
# Make a raid1, add a device, then remove it again.
mdadm -CR $md0 -l1 -n2 -x1 $dev0 $dev1 $dev2
check resync
check wait
check state UU
mdadm --grow $md0 -n 3
check recovery
check wait
check state UUU
mdadm $md0 --fail $dev0
check state _UU
mdadm --grow $md0 -n 2
check state UU
mdadm -S $md0
# same again for version-1
mdadm -CR $md0 -l1 -n2 -e1.2 -x1 $dev0 $dev1 $dev2
check resync
check wait
check state UU
mdadm --grow $md0 -n 3
check recovery
check wait
check state UUU
mdadm $md0 --fail $dev0
check state _UU
mdadm --grow $md0 -n 2
check state UU
mdadm -S $md0

36
tests/02r1grow Normal file
View file

@ -0,0 +1,36 @@
# create a small raid1 array, make it larger. Then make it smaller
mdadm -CR $md0 -e 0.90 --level raid1 --raid-disks 3 --size $[size/2] $dev1 $dev2 $dev3
check wait
check state UUU
testdev $md0 1 $[size/2] 1
mdadm --grow $md0 --size max
check resync
check wait
testdev $md0 1 $mdsize0 1
mdadm --grow $md0 --size $[size/2]
check nosync
testdev $md0 1 $[size/2] 1
mdadm -S $md0
# same again with version 1.1 superblock
mdadm -CR $md0 --level raid1 --metadata=1.1 --raid-disks 3 --size $[size/2] $dev1 $dev2 $dev3
check wait
check state UUU
testdev $md0 1 $[size/2] 1
mdadm --grow $md0 --size max
check resync
check wait
testdev $md0 1 $mdsize1_l 1
mdadm --grow $md0 --size $[size/2]
check nosync
testdev $md0 1 $[size/2] 1
mdadm -S $md0

53
tests/02r5grow Normal file
View file

@ -0,0 +1,53 @@
# create a small raid5 array, make it larger. Then make it smaller
mdadm -CR $md0 -e0.90 --level raid5 --chunk=64 --raid-disks 3 --size $[size/2] $dev1 $dev2 $dev3
check wait
check state UUU
testdev $md0 2 $[size/2] 32
mdadm --grow $md0 --size max
check resync
check wait
testdev $md0 2 $mdsize0 32
mdadm --grow $md0 --size $[size/2]
check nosync
testdev $md0 2 $[size/2] 32
mdadm -S $md0
# same again with version 1.1 superblock
mdadm -CR $md0 --level raid5 --metadata=1.1 --chunk=128 --raid-disks 4 --size $[size/2] $dev1 $dev2 $dev3 $dev4
check wait
check state UUUU
testdev $md0 3 $[size/2] 128
mdadm --grow $md0 --size max
check resync
check wait
testdev $md0 3 $[mdsize1_l] 128
mdadm --grow $md0 --size $[size/2]
check nosync
testdev $md0 3 $[size/2] 128
mdadm -S $md0
# create a raid5 array and change the chunk
mdadm -CR $md0 --level raid5 --metadata=1.1 --chunk=32 --raid-disks 3 --size $[size/2] $dev1 $dev2 $dev3
check wait
check state UUU
check chunk 32
mdadm $md0 --grow --chunk=64
check reshape
check wait
check chunk 64
mdadm -S $md0
mdadm -A $md0 $dev1 $dev2 $dev3
check state UUU
check chunk 64
mdadm -S $md0

36
tests/02r6grow Normal file
View file

@ -0,0 +1,36 @@
# create a small raid6 array, make it larger. Then make it smaller
mdadm -CR $md0 -e 0.90 --level raid6 --chunk=64 --raid-disks 4 --size $[size/2] $dev1 $dev2 $dev3 $dev4
check wait
check state UUUU
testdev $md0 2 $[size/2] 32
mdadm --grow $md0 --size max
check resync
check wait
testdev $md0 2 $mdsize0 32
mdadm --grow $md0 --size $[size/2]
check nosync
testdev $md0 2 $[size/2] 32
mdadm -S $md0
# same again with version 1.1 superblock
mdadm -CR $md0 --level raid6 --metadata=1.1 --chunk=128 --raid-disks 4 --size $[size/2] $dev1 $dev2 $dev3 $dev4
check wait
check state UUUU
testdev $md0 2 $[size/2] 128
mdadm --grow $md0 --size max
check resync
check wait
testdev $md0 2 $[mdsize1_l] 128
mdadm --grow $md0 --size $[size/2]
check nosync
testdev $md0 2 $[size/2] 128
mdadm -S $md0

17
tests/03assem-incr Normal file
View file

@ -0,0 +1,17 @@
set -x -e
# Test interaction between -I and -A
# there are locking issue too, but those are hard to test for.
#
# Here just test that a partly "-I" assembled array can
# be completed with "-A"
for l in 0 1 5 linear
do
mdadm -CR $md0 -l $l -n5 $dev0 $dev1 $dev2 $dev3 $dev4 --assume-clean
mdadm -S md0
mdadm -I $dev1
mdadm -I $dev3
mdadm -A /dev/md0 $dev0 $dev1 $dev2 $dev3 $dev4
mdadm -S /dev/md0
done

137
tests/03r0assem Normal file
View file

@ -0,0 +1,137 @@
# create a raid0 array from 3 devices, and assemble it in a multitude of ways.
# explicitly list devices
# uuid, md-minor on command line with wildcard devices
# mdadm.conf file
mdadm -CR $md2 -l0 -n3 $dev0 $dev1 $dev2
check raid0
tst="testdev $md2 3 $mdsize1_l 512"
$tst
uuid=`mdadm -Db $md2 | sed 's/.*UUID=//'`
mdadm -S $md2
mdadm -A $md2 $dev0 $dev1 $dev2
$tst
mdadm -S $md2
mdadm -A $md2 -u $uuid $devlist
$tst
mdadm -S $md2
mdadm --assemble $md2 --name=2 $devlist
$tst
mdadm -S $md2
conf=$targetdir/mdadm.conf
{
echo DEVICE $devlist
echo array $md2 UUID=$uuid
} > $conf
mdadm -As -c $conf $md2
$tst
mdadm -S $md2
{
echo DEVICE $devlist
echo array $md2 name=2
} > $conf
mdadm -As -c $conf $md2
$tst
mdadm -S $md2
{
echo DEVICE $devlist
echo array $md2 devices=$dev0,$dev1,$dev2
} > $conf
mdadm -As -c $conf $md2
$tst
echo "DEVICE $devlist" > $conf
mdadm -Db $md2 >> $conf
mdadm -S $md2
mdadm --assemble --scan --config=$conf $md2
$tst
mdadm -S $md2
echo " metadata=0.90 devices=$dev0,$dev1,$dev2" >> $conf
mdadm --assemble --scan --config=$conf $md2
$tst
mdadm -S $md2
### Now for version 0...
mdadm --zero-superblock $dev0 $dev1 $dev2
mdadm -CR $md2 -l0 --metadata=0.90 -n3 $dev0 $dev1 $dev2
check raid0
tst="testdev $md2 3 $mdsize0 512"
$tst
uuid=`mdadm -Db $md2 | sed 's/.*UUID=//'`
mdadm -S $md2
mdadm -A $md2 $dev0 $dev1 $dev2
$tst
mdadm -S $md2
mdadm -A $md2 -u $uuid $devlist
$tst
mdadm -S $md2
mdadm --assemble $md2 --super-minor=2 $devlist #
$tst
mdadm -S $md2
conf=$targetdir/mdadm.conf
{
echo DEVICE $devlist
echo array $md2 UUID=$uuid
} > $conf
mdadm -As -c $conf $md2
$tst
mdadm -S $md2
{
echo DEVICE $devlist
echo array $md2 super-minor=2
} > $conf
mdadm -As -c $conf $md2
$tst
mdadm -S $md2
{
echo DEVICE $devlist
echo array $md2 devices=$dev0,$dev1,$dev2
} > $conf
mdadm -As -c $conf $md2
$tst
echo "DEVICE $devlist" > $conf
mdadm -Db $md2 >> $conf
mdadm -S $md2
mdadm --assemble --scan --config=$conf $md2
$tst
mdadm -S $md2
echo " metadata=1 devices=$dev0,$dev1,$dev2" >> $conf
mdadm --assemble --scan --config=$conf $md2
$tst
mdadm -S $md2
# Now use incremental assembly.
mdadm -I --config=$conf $dev0
mdadm -I --config=$conf $dev1
mdadm -I --config=$conf $dev2
$tst
mdadm -S $md2

109
tests/03r5assem Normal file
View file

@ -0,0 +1,109 @@
# create a raid5 array and assemble it in various ways,
# including with missing devices.
mdadm -CR -e 0.90 $md1 -l5 -n3 $dev0 $dev1 $dev2
tst="check raid5 ;testdev $md1 2 $mdsize0 512 ; mdadm -S $md1"
uuid=`mdadm -Db $md1 | sed 's/.*UUID=//'`
check wait
eval $tst
mdadm -A $md1 $dev0 $dev1 $dev2
eval $tst
mdadm -A $md1 -u $uuid $devlist
eval $tst
mdadm -A $md1 -m 1 $devlist
eval $tst
conf=$targetdir/mdadm.conf
{
echo DEVICE $devlist
echo array $md1 UUID=$uuid
} > $conf
mdadm -As -c $conf $md1
eval $tst
{
echo DEVICE $devlist
echo array $md1 super-minor=1
} > $conf
mdadm -As -c $conf
eval $tst
{
echo DEVICE $devlist
echo array $md1 devices=$dev0,$dev1,$dev2
} > $conf
mdadm -As -c $conf
echo "DEVICE $devlist" > $conf
mdadm -Db $md1 >> $conf
eval $tst
mdadm --assemble --scan --config=$conf $md1
eval $tst
echo " metadata=0.90 devices=$dev0,$dev1,$dev2" >> $conf
mdadm --assemble --scan --config=$conf $md1
eval $tst
### Now with a missing device
mdadm -AR $md1 $dev0 $dev2 #
check state U_U
eval $tst
mdadm -A $md1 -u $uuid $devlist
check state U_U
eval $tst
mdadm -A $md1 -m 1 $devlist
check state U_U
eval $tst
conf=$targetdir/mdadm.conf
{
echo DEVICE $devlist
echo array $md1 UUID=$uuid
} > $conf
mdadm -As -c $conf $md1
check state U_U
eval $tst
{
echo DEVICE $devlist
echo array $md1 super-minor=1
} > $conf
mdadm -As -c $conf
check state U_U
eval $tst
{
echo DEVICE $devlist
echo array $md1 devices=$dev0,$dev1,$dev2
} > $conf
mdadm -As -c $conf
echo "DEVICE $devlist" > $conf
mdadm -Db $md1 >> $conf
check state U_U
eval $tst
mdadm --assemble --scan --config=$conf $md1
check state U_U
eval $tst
echo " metadata=0.90 devices=$dev0,$dev1,$dev2" >> $conf
mdadm --assemble --scan --config=$conf $md1
check state U_U
eval $tst

12
tests/03r5assem-failed Normal file
View file

@ -0,0 +1,12 @@
# Create an array, fail one device while array is active, stop array,
# then re-assemble listing the failed device first.
mdadm -CR $md1 -l5 -n4 $dev0 $dev1 $dev2 $dev3
check wait
echo 2000 > /sys/block/md1/md/safe_mode_delay
mkfs $md1
mdadm $md1 -f $dev0
mdadm -S $md1
mdadm -A $md1 $dev0 $dev1 $dev2 $dev3 || exit 1

128
tests/03r5assemV1 Normal file
View file

@ -0,0 +1,128 @@
# create a v-1 raid5 array and assemble in various ways
mdadm -CR -e1 --name one $md1 -l5 -n3 -x2 $dev0 $dev1 $dev2 $dev3 $dev4
tst="check raid5 ;testdev $md1 2 $mdsize1 512 ; mdadm -S $md1"
uuid=`mdadm -Db $md1 | sed 's/.*UUID=//'`
check wait
eval $tst
mdadm -A $md1 $dev0 $dev1 $dev2
mdadm $md1 --add $dev3 $dev4
check spares 2
eval $tst
mdadm -A $md1 -u $uuid $devlist
check spares 2
eval $tst
mdadm -A $md1 --name one $devlist
check spares 2
eval $tst
conf=$targetdir/mdadm.conf
{
echo DEVICE $devlist
echo array $md1 UUID=$uuid
} > $conf
mdadm -As -c $conf $md1
eval $tst
{
echo DEVICE $devlist
echo array $md1 name=one
} > $conf
mdadm -As -c $conf
eval $tst
{
echo DEVICE $devlist
echo array $md1 devices=$dev0,$dev1,$dev2,$dev3,$dev4
} > $conf
mdadm -As -c $conf
echo "DEVICE $devlist" > $conf
mdadm -Db $md1 >> $conf
eval $tst
mdadm --assemble --scan --config=$conf $md1
eval $tst
echo PING >&2
echo " metadata=1.0 devices=$dev0,$dev1,$dev2,$dev3,$dev4" >> $conf
mdadm --assemble --scan --config=$conf $md1
eval $tst
### Now with a missing device
# We don't want the recovery to complete while we are
# messing about here.
echo 100 > /proc/sys/dev/raid/speed_limit_max
echo 100 > /proc/sys/dev/raid/speed_limit_min
mdadm -AR $md1 $dev0 $dev2 $dev3 $dev4 #
check state U_U
check spares 1
eval $tst
mdadm -A $md1 -u $uuid $devlist
check state U_U
eval $tst
mdadm -A $md1 --name=one $devlist
check state U_U
check spares 1
eval $tst
conf=$targetdir/mdadm.conf
{
echo DEVICE $devlist
echo array $md1 UUID=$uuid
} > $conf
mdadm -As -c $conf $md1
check state U_U
eval $tst
{
echo DEVICE $devlist
echo array $md1 name=one
} > $conf
mdadm -As -c $conf
check state U_U
eval $tst
{
echo DEVICE $devlist
echo array $md1 devices=$dev0,$dev1,$dev2
} > $conf
mdadm -As -c $conf
echo "DEVICE $devlist" > $conf
mdadm -Db $md1 >> $conf
check state U_U
eval $tst
mdadm --assemble --scan --config=$conf $md1
check state U_U
eval $tst
echo " metadata=1.0 devices=$dev0,$dev1,$dev2" >> $conf
mdadm --assemble --scan --config=$conf $md1
check state U_U
eval $tst
# And now assemble with -I
mdadm -Ss
mdadm -I -c $conf $dev0
mdadm -I -c $conf $dev1
mdadm -I -c $conf $dev2
eval $tst
echo 2000 > /proc/sys/dev/raid/speed_limit_max
echo 1000 > /proc/sys/dev/raid/speed_limit_min

20
tests/04r0update Normal file
View file

@ -0,0 +1,20 @@
# create a raid0, re-assemble with a different super-minor
mdadm -CR -e 0.90 $md0 -l0 -n3 $dev0 $dev1 $dev2
testdev $md0 3 $mdsize0 512
minor1=`mdadm -E $dev0 | sed -n -e 's/.*Preferred Minor : //p'`
mdadm -S /dev/md0
mdadm -A $md1 $dev0 $dev1 $dev2
minor2=`mdadm -E $dev0 | sed -n -e 's/.*Preferred Minor : //p'`
mdadm -S /dev/md1
mdadm -A $md1 --update=super-minor $dev0 $dev1 $dev2
minor3=`mdadm -E $dev0 | sed -n -e 's/.*Preferred Minor : //p'`
mdadm -S /dev/md1
case "$minor1 $minor2 $minor3" in
"0 0 1" ) ;;
* ) echo >&2 "ERROR minors should be '0 0 1' but are '$minor1 $minor2 $minor3'"
exit 1
esac

15
tests/04r1update Normal file
View file

@ -0,0 +1,15 @@
set -i
# create a raid1 array, let it sync, then re-assemble with a force-sync
mdadm -CR $md0 -l1 -n2 $dev0 $dev1
check wait
mdadm -S $md0
mdadm -A $md0 $dev0 $dev1
check nosync
mdadm -S $md0
mdadm -A $md0 -U resync $dev0 $dev1
check resync
mdadm -S $md0

18
tests/04r5swap Normal file
View file

@ -0,0 +1,18 @@
# make a raid5 array, byte swap the superblocks, then assemble...
mdadm -CR $md0 -e 0.90 -l5 -n4 $dev0 $dev1 $dev2 $dev3
sleep 4
mdadm -S $md0
mdadm -E --metadata=0 $dev1 > $targetdir/d1
for d in $dev0 $dev1 $dev2 $dev3
do $dir/swap_super $d
done
mdadm -E --metadata=0.swap $dev1 > $targetdir/d1s
diff -u $targetdir/d1 $targetdir/d1s
mdadm --assemble --update=byteorder $md0 $dev0 $dev1 $dev2 $dev3
sleep 3
check recovery
mdadm -S $md0

48
tests/04update-metadata Normal file
View file

@ -0,0 +1,48 @@
set -xe
# test converting v0.90 to v1.0
# check for different levels
# check it fails for non-v0.90
# check it fails during reshape or recovery
# check it fails when bitmap is present
dlist="$dev0 $dev1 $dev2 $dev3"
for ls in raid0/4 linear/4 raid1/1 raid5/3 raid6/2
do
s=${ls#*/} l=${ls%/*}
mdadm -CR --assume-clean -e 0.90 $md0 --level $l -n 4 -c 64 $dlist
testdev $md0 $s 19904 64
mdadm -S $md0
mdadm -A $md0 --update=metadata $dlist
testdev $md0 $s 19904 64 check
mdadm -S $md0
done
if mdadm -A $md0 --update=metadata $dlist
then echo >&2 should fail with v1.0 metadata
exit 1
fi
mdadm -CR -e 0.90 $md0 --level=6 -n4 -c32 $dlist
mdadm -S $md0
if mdadm -A $md0 --update=metadata $dlist
then echo >&2 should fail during resync
exit 1
fi
mdadm -A $md0 $dlist
mdadm --wait $md0 || true
mdadm -S $md0
# should succeed now
mdadm -A $md0 --update=metadata $dlist
mdadm -S /dev/md0
mdadm -CR --assume-clean -e 0.90 $md0 --level=6 -n4 -c32 $dlist --bitmap=internal
mdadm -S $md0
if mdadm -A $md0 --update=metadata $dlist
then echo >&2 should fail when bitmap present
exit 1
fi

82
tests/04update-uuid Normal file
View file

@ -0,0 +1,82 @@
set -x
# create an array, then change the uuid.
mdadm -CR --assume-clean $md0 -l5 -n3 $dev0 $dev1 $dev2
mdadm -S /dev/md0
mdadm -A /dev/md0 --update=uuid --uuid=0123456789abcdef:fedcba9876543210 $dev0 $dev1 $dev2
no_errors
mdadm -D /dev/md0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || {
echo Wrong uuid; mdadm -D /dev/md0 ; exit 2;
}
mdadm -S /dev/md0
# try v1 superblock
mdadm -CR --assume-clean -e1 $md0 -l5 -n3 $dev0 $dev1 $dev2
mdadm -S /dev/md0
mdadm -A /dev/md0 --update=uuid --uuid=0123456789abcdef:fedcba9876543210 $dev0 $dev1 $dev2
no_errors
mdadm -D /dev/md0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || {
echo Wrong uuid; mdadm -D /dev/md0 ; exit 2;
}
mdadm -S /dev/md0
# now if we have a bitmap, that needs updating too.
rm -f $targetdir/bitmap
mdadm -CR --assume-clean -b $targetdir/bitmap $md0 -l5 -n3 $dev0 $dev1 $dev2
mdadm -S /dev/md0
mdadm -A /dev/md0 -b $targetdir/bitmap --update=uuid --uuid=0123456789abcdef:fedcba9876543210 $dev0 $dev1 $dev2
no_errors
mdadm -D /dev/md0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || {
echo Wrong uuid; mdadm -D /dev/md0 ; exit 2;
}
if mdadm -X $targetdir/bitmap | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 ||
mdadm -X $targetdir/bitmap | grep -s > /dev/null 67452301:efcdab89:98badcfe:10325476
then : ; else
echo Wrong uuid; mdadm -X $targetdir/bitmap ; exit 2;
fi
mdadm -S /dev/md0
# and bitmap for version1
rm -f $targetdir/bitmap
mdadm -CR --assume-clean -e1.1 -b $targetdir/bitmap $md0 -l5 -n3 $dev0 $dev1 $dev2
mdadm -S /dev/md0
mdadm -A /dev/md0 -b $targetdir/bitmap --update=uuid --uuid=0123456789abcdef:fedcba9876543210 $dev0 $dev1 $dev2
no_errors
mdadm -D /dev/md0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || {
echo Wrong uuid; mdadm -D /dev/md0 ; exit 2;
}
# -X cannot tell which byteorder to use for the UUID, so allow both.
if mdadm -X $targetdir/bitmap | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 ||
mdadm -X $targetdir/bitmap | grep -s > /dev/null 67452301:efcdab89:98badcfe:10325476
then : ; else
echo Wrong uuid; mdadm -X $targetdir/bitmap ; exit 2;
fi
mdadm -S /dev/md0
# Internal bitmaps too.
mdadm -CR --assume-clean -b internal --bitmap-chunk 4 $md0 -l5 -n3 $dev0 $dev1 $dev2
mdadm -S /dev/md0
mdadm -A /dev/md0 --update=uuid --uuid=0123456789abcdef:fedcba9876543210 $dev0 $dev1 $dev2
no_errors
mdadm -D /dev/md0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || {
echo Wrong uuid; mdadm -D /dev/md0 ; exit 2;
}
mdadm -X $dev0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || {
echo Wrong uuid; mdadm -X $dev0; exit 2;
}
mdadm -S /dev/md0
mdadm -CR --assume-clean -e1.2 -b internal --bitmap-chunk=4 $md0 -l5 -n3 $dev0 $dev1 $dev2
mdadm -S /dev/md0
mdadm -A /dev/md0 --update=uuid --uuid=0123456789abcdef:fedcba9876543210 $dev0 $dev1 $dev2
no_errors
mdadm -D /dev/md0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || {
echo Wrong uuid; mdadm -D /dev/md0 ; exit 2;
}
mdadm -X $dev0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || {
echo Wrong uuid; mdadm -X $dev0; exit 2;
}
mdadm -S /dev/md0

View file

@ -0,0 +1,20 @@
#
# create a raid1 without any bitmap, add the bitmap and then write to
# the device. This should catch the case where the bitmap is created
# but not reloaded correctly, such as the case fixed by
# 4474ca42e2577563a919fd3ed782e2ec55bf11a2
#
mdadm --create --run $md0 --metadata=0.9 --level=1 -n2 --delay=1 $dev1 $dev2
check wait
check nobitmap
testdev $md0 1 $mdsize1b 64
mdadm -Gb internal --bitmap-chunk=4 $md0
check bitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0
# Re-assemble the array and verify the bitmap is still present
mdadm --assemble $md0 $dev1 $dev2
check bitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0

View file

@ -0,0 +1,20 @@
#
# create a raid1 without any bitmap, add the bitmap and then write to
# the device. This should catch the case where the bitmap is created
# but not reloaded correctly, such as the case fixed by
# 4474ca42e2577563a919fd3ed782e2ec55bf11a2
#
mdadm --create --run $md0 --metadata=1.0 --level=1 -n2 --delay=1 $dev1 $dev2
check wait
check nobitmap
testdev $md0 1 $mdsize1b 64
mdadm -Gb internal --bitmap-chunk=4 $md0
check bitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0
# Re-assemble the array and verify the bitmap is still present
mdadm --assemble $md0 $dev1 $dev2
check bitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0

View file

@ -0,0 +1,20 @@
#
# create a raid1 without any bitmap, add the bitmap and then write to
# the device. This should catch the case where the bitmap is created
# but not reloaded correctly, such as the case fixed by
# 4474ca42e2577563a919fd3ed782e2ec55bf11a2
#
mdadm --create --run $md0 --metadata=1.1 --level=1 -n2 --delay=1 $dev1 $dev2
check wait
check nobitmap
testdev $md0 1 $mdsize1b 64
mdadm -Gb internal --bitmap-chunk=4 $md0
check bitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0
# Re-assemble the array and verify the bitmap is still present
mdadm --assemble $md0 $dev1 $dev2
check bitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0

View file

@ -0,0 +1,20 @@
#
# create a raid1 without any bitmap, add the bitmap and then write to
# the device. This should catch the case where the bitmap is created
# but not reloaded correctly, such as the case fixed by
# 4474ca42e2577563a919fd3ed782e2ec55bf11a2
#
mdadm --create --run $md0 --metadata=1.2 --level=1 -n2 --delay=1 $dev1 $dev2
check wait
check nobitmap
testdev $md0 1 $mdsize1b 64
mdadm -Gb internal --bitmap-chunk=4 $md0
check bitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0
# Re-assemble the array and verify the bitmap is still present
mdadm --assemble $md0 $dev1 $dev2
check bitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0

49
tests/05r1-bitmapfile Normal file
View file

@ -0,0 +1,49 @@
#
# create a raid1 with a bitmap file
#
bmf=$targetdir/bitmap
rm -f $bmf
mdadm --create --run $md0 --level=1 -n2 --delay=1 --bitmap $bmf $dev1 $dev2
check wait
testdev $md0 1 $mdsize1a 64
mdadm -S $md0
mdadm --assemble $md0 --bitmap=$bmf $dev1 $dev2
testdev $md0 1 $mdsize1a 64
dirty1=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
sleep 4
dirty2=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
exit 1
fi
mdadm $md0 -f $dev1
testdev $md0 1 $mdsize1a 64
sleep 4
dirty3=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty3 -lt 400 ]
then
echo >&2 "ERROR dirty count $dirty3 is too small"
exit 2
fi
mdadm -S $md0
mdadm --assemble -R $md0 --bitmap=$bmf $dev2
dirty4=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
mdadm --zero $dev1 # force --add, not --re-add
mdadm $md0 --add $dev1
#it is too fast# check recovery
check wait
sleep 4
dirty5=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
exit 1
fi
mdadm -S $md0

74
tests/05r1-failfast Normal file
View file

@ -0,0 +1,74 @@
# create a simple mirror and check failfast flag works
mdadm -CR $md0 -e1.2 --level=raid1 --failfast -n2 $dev0 $dev1
check raid1
if grep -v failfast /sys/block/md0/md/rd*/state > /dev/null
then
die "failfast missing"
fi
# Removing works with the failfast flag
mdadm $md0 -f $dev0
mdadm $md0 -r $dev0
if grep -v failfast /sys/block/md0/md/rd1/state > /dev/null
then
die "failfast missing"
fi
# Adding works with the failfast flag
mdadm $md0 -a --failfast $dev0
check wait
if grep -v failfast /sys/block/md0/md/rd0/state > /dev/null
then
die "failfast missing"
fi
mdadm -S $md0
# Assembling works with the failfast flag
mdadm -A $md0 $dev0 $dev1
check raid1
if grep -v failfast /sys/block/md0/md/rd*/state > /dev/null
then
die "failfast missing"
fi
# Adding works with the nofailfast flag
mdadm $md0 -f $dev0
mdadm $md0 -r $dev0
mdadm $md0 -a --nofailfast $dev0
check wait
if grep failfast /sys/block/md0/md/rd0/state > /dev/null
then
die "failfast should be missing"
fi
# Assembling with one faulty slave works with the failfast flag
mdadm $md0 -f $dev0
mdadm $md0 -r $dev0
mdadm -S $md0
mdadm -A $md0 $dev0 $dev1
check raid1
mdadm -S $md0
# Spare works with the failfast flag
mdadm -CR $md0 -e1.2 --level=raid1 --failfast -n2 $dev0 $dev1
check raid1
mdadm $md0 -a --failfast $dev2
check wait
check spares 1
if grep -v failfast /sys/block/md0/md/rd*/state > /dev/null
then
die "failfast missing"
fi
# Grow works with the failfast flag
mdadm -G $md0 --raid-devices=3
check wait
if grep -v failfast /sys/block/md0/md/rd*/state > /dev/null
then
die "failfast missing"
fi
mdadm -S $md0
exit 0

33
tests/05r1-grow-external Normal file
View file

@ -0,0 +1,33 @@
#
# create a raid1 array, add an external bitmap
#
mdadm --create --run $md0 -l 1 -n 2 $dev1 $dev2
check wait
testdev $md0 1 $mdsize1a 64
bmf=$targetdir/bm
rm -f $bmf
#mdadm -E $dev1
mdadm --grow $md0 --bitmap=$bmf --delay=1 || { mdadm -X $bmf ; exit 1; }
dirty1=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
sleep 4
dirty2=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
testdev $md0 1 $mdsize1a 64
dirty3=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
sleep 4
dirty4=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
#echo $dirty1 $dirty2 $dirty3 $dirty4
if [ $dirty2 -ne 0 -o $dirty4 -ne 0 -o $dirty3 -lt 400 ]
then
echo bad dirty counts
exit 1
fi
# now to remove the bitmap
check bitmap
mdadm --grow $md0 --bitmap=none
check nobitmap
mdadm -S $md0

31
tests/05r1-grow-internal Normal file
View file

@ -0,0 +1,31 @@
#
# create a raid1 array, add an internal bitmap
#
mdadm --create --run $md0 -l 1 -n 2 $dev1 $dev2
check wait
testdev $md0 1 $mdsize1a 64
#mdadm -E $dev1
mdadm --grow $md0 --bitmap=internal --bitmap-chunk=4 --delay=1 || { mdadm -X $dev2 ; exit 1; }
dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
sleep 4
dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
testdev $md0 1 $mdsize1a 64
dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
sleep 4
dirty4=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
#echo $dirty1 $dirty2 $dirty3 $dirty4
if [ $dirty2 -ne 0 -o $dirty4 -ne 0 -o $dirty3 -lt 400 ]
then
echo bad dirty counts
exit 1
fi
# now to remove the bitmap
check bitmap
mdadm --grow $md0 --bitmap=none
check nobitmap
mdadm -S $md0

View file

@ -0,0 +1,31 @@
#
# create a raid1 array, version 1 superblock, add an internal bitmap
#
mdadm --create --run $md0 -e1 -l 1 -n 2 $dev1 $dev2
check wait
testdev $md0 1 $mdsize1b 64
#mdadm -E $dev1
mdadm --grow $md0 --bitmap=internal --bitmap-chunk=4 --delay=1
dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
sleep 4
dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
testdev $md0 1 $mdsize1b 64
dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
sleep 4
dirty4=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
#echo $dirty1 $dirty2 $dirty3 $dirty4
if [ $dirty2 -ne 0 -o $dirty4 -ne 0 -o $dirty3 -lt 400 ]
then
echo bad dirty counts
exit 1
fi
# now to remove the bitmap
check bitmap
mdadm --grow $md0 --bitmap=none
check nobitmap
mdadm -S $md0

47
tests/05r1-internalbitmap Normal file
View file

@ -0,0 +1,47 @@
#
# create a raid1 with an internal bitmap
#
mdadm --create -e0.90 --run $md0 --level=1 -n2 --delay=1 --bitmap internal --bitmap-chunk=4 $dev1 $dev2
check wait
testdev $md0 1 $mdsize0 64
mdadm -S $md0
mdadm --assemble $md0 $dev1 $dev2
testdev $md0 1 $mdsize0 64
dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
sleep 4
dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
exit 1
fi
mdadm $md0 -f $dev1
testdev $md0 1 $mdsize0 64
sleep 4
dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty3 -lt 400 ]
then
echo >&2 "ERROR dirty count $dirty3 is too small"
exit 2
fi
mdadm -S $md0
mdadm --assemble -R $md0 $dev2
mdadm --zero-superblock $dev1
mdadm $md0 --add $dev1
check recovery
dirty4=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
check wait
sleep 4
dirty5=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
exit 1
fi
mdadm -S $md0

View file

@ -0,0 +1,48 @@
#
# create a raid1 with an internal bitmap
#
mdadm --create --run $md0 --metadata=1.0 --level=1 -n2 --delay=1 --bitmap internal --bitmap-chunk=4 $dev1 $dev2
check wait
check bitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0
mdadm --assemble $md0 $dev1 $dev2
testdev $md0 1 $mdsize1b 64
dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
sleep 4
dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
exit 1
fi
mdadm $md0 -f $dev1
testdev $md0 1 $mdsize1b 64
sleep 4
dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty3 -lt 400 ]
then
echo >&2 "ERROR dirty count $dirty3 is too small"
exit 2
fi
mdadm -S $md0
mdadm --zero-superblock $dev1
mdadm --assemble -R $md0 $dev2
mdadm $md0 --add $dev1
check recovery
dirty4=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
check wait
sleep 4
dirty5=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
exit 1
fi
mdadm -S $md0

View file

@ -0,0 +1,49 @@
#
# create a raid1 with an internal bitmap
#
mdadm --create --run $md0 --metadata=1.1 --level=1 -n2 --delay=1 --bitmap internal --bitmap-chunk=4 $dev1 $dev2
check wait
check bitmap
testdev $md0 1 $mdsize11 64
mdadm -S $md0
mdadm --assemble $md0 $dev1 $dev2
check bitmap
testdev $md0 1 $mdsize11 64
dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
sleep 4
dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
exit 1
fi
mdadm $md0 -f $dev1
testdev $md0 1 $mdsize11 64
sleep 4
dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty3 -lt 400 ]
then
echo >&2 "ERROR dirty count $dirty3 is too small"
exit 2
fi
mdadm -S $md0
mdadm --zero-superblock $dev1
mdadm --assemble -R $md0 $dev2
mdadm $md0 --add $dev1
check recovery
dirty4=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
check wait
sleep 4
dirty5=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
exit 1
fi
mdadm -S $md0

View file

@ -0,0 +1,48 @@
#
# create a raid1 with an internal bitmap
#
mdadm --create --run $md0 --metadata=1.2 --level=1 -n2 --delay=1 --bitmap internal --bitmap-chunk 4 $dev1 $dev2
check wait
check bitmap
testdev $md0 1 $mdsize12 64
mdadm -S $md0
mdadm --assemble $md0 $dev1 $dev2
testdev $md0 1 $mdsize12 64
dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
sleep 4
dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
exit 1
fi
mdadm $md0 -f $dev1
testdev $md0 1 $mdsize12 64
sleep 4
dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty3 -lt 400 ]
then
echo >&2 "ERROR dirty count $dirty3 is too small"
exit 2
fi
mdadm -S $md0
mdadm --zero-superblock $dev1
mdadm --assemble -R $md0 $dev2
mdadm $md0 --add $dev1
check recovery
dirty4=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
check wait
sleep 4
dirty5=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
exit 1
fi
mdadm -S $md0

53
tests/05r1-n3-bitmapfile Normal file
View file

@ -0,0 +1,53 @@
#
# create a raid1 with 3 devices and a bitmap file
# make sure resync does right thing.
#
#
bmf=$targetdir/bitmap
rm -f $bmf
mdadm --create -e0.90 --run $md0 --level=1 -n3 --delay=1 --bitmap $bmf $dev1 $dev2 $dev3
check wait
testdev $md0 1 $mdsize0 64
mdadm -S $md0
mdadm --assemble $md0 --bitmap=$bmf $dev1 $dev2 $dev3
testdev $md0 1 $mdsize0 64
dirty1=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
sleep 4
dirty2=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
exit 1
fi
mdadm $md0 -f $dev2
testdev $md0 1 $mdsize0 64
sleep 4
dirty3=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty3 -lt 400 ]
then
echo >&2 "ERROR dirty count $dirty3 is too small"
exit 2
fi
mdadm -S $md0
mdadm --assemble -R $md0 --bitmap=$bmf $dev1 $dev3
check nosync
mdadm --zero-superblock $dev2
mdadm $md0 --add $dev2
check recovery
dirty4=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
check wait
sleep 4
dirty5=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
exit 1
fi
mdadm -S $md0
exit 0

39
tests/05r1-re-add Normal file
View file

@ -0,0 +1,39 @@
#
# create a raid1, remove a drive, and readd it.
# resync should be instant.
# Then do some IO first. Resync should still be very fast
#
mdadm -CR $md0 -l1 -n2 -binternal --bitmap-chunk=4 -d1 $dev1 $dev2
check resync
check wait
testdev $md0 1 $mdsize1a 64
sleep 4
mdadm $md0 -f $dev2
sleep 1
mdadm $md0 -r $dev2
mdadm $md0 -a $dev2
#cat /proc/mdstat
check nosync
mdadm $md0 -f $dev2
sleep 1
mdadm $md0 -r $dev2
testdev $md0 1 $mdsize1a 64
mdadm $md0 -a $dev2
check wait
blockdev --flushbufs $dev1 $dev2
cmp --ignore-initial=$[64*512] --bytes=$[$mdsize0*1024] $dev1 $dev2
mdadm $md0 -f $dev2; sleep 1
mdadm $md0 -r $dev2
if dd if=/dev/zero of=$md0 ; then : ; fi
blockdev --flushbufs $md0 # ensure writes have been sent.
mdadm $md0 -a $dev2
check recovery
check wait
blockdev --flushbufs $dev1 $dev2
cmp --ignore-initial=$[64*512] --bytes=$[$mdsize0*1024] $dev1 $dev2
mdadm -S $md0

38
tests/05r1-re-add-nosuper Normal file
View file

@ -0,0 +1,38 @@
#
# create a raid1, remove a drive, and readd it.
# resync should be instant.
# Then do some IO first. Resync should still be very fast
#
bmf=$targetdir/bitmap2
rm -f $bmf
mdadm -B $md0 -l1 -n2 -b$bmf -d1 $dev1 $dev2
check resync
check wait
testdev $md0 1 $size 1
sleep 4
mdadm $md0 -f $dev2
sleep 1
mdadm $md0 -r $dev2
mdadm $md0 --re-add $dev2
check nosync
mdadm $md0 -f $dev2
sleep 1
mdadm $md0 -r $dev2
testdev $md0 1 $size 1
mdadm $md0 --re-add $dev2
check wait
cmp --bytes=$[$mdsize0*1024] $dev1 $dev2
mdadm $md0 -f $dev2; sleep 1
mdadm $md0 -r $dev2
if dd if=/dev/zero of=$md0 ; then : ; fi
blockdev --flushbufs $md0 # make sure writes have been sent
mdadm $md0 --re-add $dev2
check recovery
check wait
# should BLKFLSBUF and then read $dev1/$dev2...
cmp --bytes=$[$mdsize0*1024] $file1 $file2
mdadm -S $md0

View file

@ -0,0 +1,18 @@
#
# create a raid1 with bitmap, remove the bitmap and verify it is still
# gone when re-assembling the array
#
mdadm --create --run $md0 --metadata=0.9 --level=1 -n2 --bitmap internal --bitmap-chunk=4 --delay=1 $dev1 $dev2
check wait
check bitmap
testdev $md0 1 $mdsize1b 64
mdadm -Gb none $md0
check nobitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0
# Re-assemble the array and verify the bitmap is still present
mdadm --assemble $md0 $dev1 $dev2
check nobitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0

View file

@ -0,0 +1,18 @@
#
# create a raid1 with bitmap, remove the bitmap and verify it is still
# gone when re-assembling the array
#
mdadm --create --run $md0 --metadata=1.0 --level=1 -n2 --bitmap internal --bitmap-chunk=4 --delay=1 $dev1 $dev2
check wait
check bitmap
testdev $md0 1 $mdsize1b 64
mdadm -Gb none $md0
check nobitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0
# Re-assemble the array and verify the bitmap is still present
mdadm --assemble $md0 $dev1 $dev2
check nobitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0

View file

@ -0,0 +1,18 @@
#
# create a raid1 with bitmap, remove the bitmap and verify it is still
# gone when re-assembling the array
#
mdadm --create --run $md0 --metadata=1.1 --level=1 -n2 --bitmap internal --bitmap-chunk=4 --delay=1 $dev1 $dev2
check wait
check bitmap
testdev $md0 1 $mdsize1b 64
mdadm -Gb none $md0
check nobitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0
# Re-assemble the array and verify the bitmap is still present
mdadm --assemble $md0 $dev1 $dev2
check nobitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0

View file

@ -0,0 +1,18 @@
#
# create a raid1 with bitmap, remove the bitmap and verify it is still
# gone when re-assembling the array
#
mdadm --create --run $md0 --metadata=1.2 --level=1 -n2 --bitmap internal --bitmap-chunk=4 --delay=1 $dev1 $dev2
check wait
check bitmap
testdev $md0 1 $mdsize1b 64
mdadm -Gb none $md0
check nobitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0
# Re-assemble the array and verify the bitmap is still present
mdadm --assemble $md0 $dev1 $dev2
check nobitmap
testdev $md0 1 $mdsize1b 64
mdadm -S $md0

49
tests/05r5-bitmapfile Normal file
View file

@ -0,0 +1,49 @@
#
# create a raid1 with a bitmap file
#
bmf=$targetdir/bitmap
rm -f $bmf
mdadm --create --run $md0 --level=5 -n3 --delay=1 --bitmap $bmf $dev1 $dev2 $dev3
check wait
testdev $md0 2 $mdsize1 512
mdadm -S $md0
mdadm --assemble $md0 --bitmap=$bmf $dev1 $dev2 $dev3
testdev $md0 2 $mdsize1 512
dirty1=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
sleep 4
dirty2=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
exit 1
fi
mdadm $md0 -f $dev1
testdev $md0 2 $mdsize1 512
sleep 4
dirty3=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty3 -lt 400 ]
then
echo >&2 "ERROR dirty count $dirty3 is too small"
exit 2
fi
mdadm -S $md0
mdadm --assemble -R $md0 --bitmap=$bmf $dev2 $dev3
mdadm --zero $dev1 # force add, not re-add
mdadm $md0 --add $dev1
check recovery
dirty4=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
check wait
sleep 4
dirty5=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
exit 1
fi
mdadm -S $md0

47
tests/05r5-internalbitmap Normal file
View file

@ -0,0 +1,47 @@
#
# create a raid1 with an internal bitmap
#
mdadm --create --run $md0 --level=5 -n3 --delay=1 --bitmap internal --bitmap-chunk=4 $dev1 $dev2 $dev3
check wait
testdev $md0 2 $mdsize1 512
mdadm -S $md0
mdadm --assemble $md0 $dev1 $dev2 $dev3
testdev $md0 2 $mdsize1 512
dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
sleep 4
dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
exit 1
fi
mdadm $md0 -f $dev1
testdev $md0 2 $mdsize1 512
sleep 4
dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty3 -lt 400 ]
then
echo >&2 "ERROR dirty count $dirty3 is too small"
exit 2
fi
mdadm -S $md0
mdadm --assemble -R $md0 $dev2 $dev3
mdadm --zero $dev1 # force --add, not --re-add
mdadm $md0 --add $dev1
check recovery
dirty4=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
check wait
sleep 4
dirty5=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
exit 1
fi
mdadm -S $md0

49
tests/05r6-bitmapfile Normal file
View file

@ -0,0 +1,49 @@
#
# create a raid1 with a bitmap file
#
bmf=$targetdir/bitmap
rm -f $bmf
mdadm --create --run $md0 --level=6 -n4 --delay=1 --bitmap $bmf $dev1 $dev2 $dev3 $dev4
check wait
testdev $md0 2 $mdsize1 512
mdadm -S $md0
mdadm --assemble $md0 --bitmap=$bmf $dev1 $dev2 $dev3 $dev4
testdev $md0 2 $mdsize1 512
dirty1=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
sleep 4
dirty2=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
exit 1
fi
mdadm $md0 -f $dev3
testdev $md0 2 $mdsize1 512
sleep 4
dirty3=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty3 -lt 400 ]
then
echo >&2 "ERROR dirty count $dirty3 is too small"
exit 2
fi
mdadm -S $md0
mdadm --assemble -R $md0 --bitmap=$bmf $dev1 $dev2 $dev4
mdadm --zero $dev3 # force --add, not --re-add
mdadm $md0 --add $dev3
check recovery
dirty4=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
check wait
sleep 4
dirty5=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
exit 1
fi
mdadm -S $md0

27
tests/05r6tor0 Normal file
View file

@ -0,0 +1,27 @@
set -x -e
# reshape a RAID6 to RAID5 and then RAID0.
# then reshape back up to RAID5 and RAID5
mdadm -CR $md0 -l6 -n5 $dev0 $dev1 $dev2 $dev3 $dev4
check wait; sleep 1
check raid6
testdev $md0 3 19456 512
mdadm -G $md0 -l5
check wait; sleep 1
check raid5
testdev $md0 3 19456 512
mdadm -G $md0 -l0
check wait; sleep 1
check raid0
testdev $md0 3 19456 512
mdadm -G $md0 -l5 --add $dev3 $dev4
check wait; sleep 1
check raid5
check algorithm 2
testdev $md0 3 19456 512
mdadm -G $md0 -l 6
check wait; sleep 1
check raid6
check algorithm 2
testdev $md0 3 19456 512

12
tests/06name Normal file
View file

@ -0,0 +1,12 @@
set -x
# create an array with a name
mdadm -CR $md0 -l0 -n2 --metadata=1 --name="Fred" $dev0 $dev1
mdadm -E $dev0 | grep 'Name : [^:]*:Fred ' > /dev/null || exit 1
mdadm -D $md0 | grep 'Name : [^:]*:Fred ' > /dev/null || exit 1
mdadm -S $md0
mdadm -A $md0 --name="Fred" $devlist
#mdadm -Db $md0
mdadm -S $md0

11
tests/06sysfs Normal file
View file

@ -0,0 +1,11 @@
exit 0
mdadm -CR $md0 -l1 -n3 $dev1 $dev2 $dev3
ls -Rl /sys/block/md0
cat /sys/block/md0/md/level
cat /sys/block/md0/md/raid_disks
mdadm -S $md0
exit 1

13
tests/06wrmostly Normal file
View file

@ -0,0 +1,13 @@
# create a raid1 array with a wrmostly device
mdadm -CR $md0 -l1 -n3 $dev0 $dev1 --write-mostly $dev2
testdev $md0 1 $mdsize1a 64
# unfortunately, we cannot measure if any read requests are going to $dev2
mdadm -S $md0
mdadm -CR $md0 -l1 -n3 --write-behind --bitmap=internal --bitmap-chunk=4 $dev0 $dev1 --write-mostly $dev2
testdev $md0 1 $mdsize1a 64
mdadm -S $md0

24
tests/07autoassemble Normal file
View file

@ -0,0 +1,24 @@
# create two raid1s, build a raid0 on top, then
# tear it down and get auto-assemble to rebuild it.
mdadm -CR $md1 -l1 -n2 $dev0 $dev1 --homehost=testing
mdadm -CR $md2 -l1 -n2 $dev2 $dev3 --homehost=testing
mdadm -CR $md0 -l0 -n2 $md1 $md2 --homehost=testing
mdadm -Ss
mdadm -As -c /dev/null --homehost=testing -vvv
testdev $md1 1 $mdsize1a 64
testdev $md2 1 $mdsize1a 64
testdev $md0 2 $mdsize11a 512
mdadm -Ss
mdadm --zero-superblock $dev0 $dev1 $dev2 $dev3
## Now the raid0 uses one stacked and one not
mdadm -CR $md1 -l1 -n2 $dev0 $dev1 --homehost=testing
mdadm -CR $md0 -l0 -n2 $md1 $dev2 --homehost=testing
mdadm -Ss
mdadm -As -c /dev/null --homehost=testing -vvv
testdev $md1 1 $mdsize1a 64
testdev $md0 1 $[mdsize1a+mdsize11a] 512
mdadm -Ss

34
tests/07autodetect Normal file
View file

@ -0,0 +1,34 @@
#
# Test in-kernel autodetect.
# Create a partitionable array on each of two devices,
# put a partition on each, create an array, and see if we can
# use autodetect to restart the array.
if lsmod | grep md_mod > /dev/null 2>&1
then
echo md is a module - cannot test autodetect
exit 0
fi
mdadm -CR -e 0 $mdp0 -l0 -f -n1 $dev0
mdadm -CR -e 0 $mdp1 -l0 -f -n1 $dev1
udevadm settle
sfdisk $mdp0 >&2 << END
,,FD
END
sfdisk $mdp1 >&2 << END
,,FD
END
udevadm settle
mdadm -CR -e 0 $md0 -l1 -n2 ${mdp0}p1 ${mdp1}p1
check resync
check raid1
check wait
mdadm -S $md0
mdadm --auto-detect
check raid1
mdadm -Ss
exit 0

61
tests/07changelevelintr Normal file
View file

@ -0,0 +1,61 @@
#
# test that we can stop and restart a level change.
# just test a few in-place changes, and a few
# size-reducing changes.
checkgeo() {
# check the geometry of an array
# level raid_disks chunk_size layout
dev=$1
shift
sleep 0.5
check wait
sleep 1
for attr in level raid_disks chunk_size layout
do
if [ $# -gt 0 ] ; then
val=$1
shift
if [ " `cat /sys/block/$dev/md/$attr`" != " $val" ]
then echo "$attr doesn't match for $dev"
exit 1
fi
fi
done
}
restart() {
sleep 0.5
check reshape
mdadm -S $md0
mdadm -A $md0 $devs --backup-file=$bu
sleep 0.5
check reshape
}
bu=/tmp/md-backup
rm -f $bu
devs="$dev0 $dev1 $dev2 $dev3 $dev4"
mdadm -CR $md0 -l5 -n5 -c 256 $devs
checkgeo md0 raid5 5 $[256*1024] 2
mdadm -G $md0 -c 128 --backup-file=$bu
restart
checkgeo md0 raid5 5 $[128*1024] 2
mdadm -G $md0 --layout rs --backup-file=$bu
restart
checkgeo md0 raid5 5 $[128*1024] 3
mdadm -G $md0 --array-size 58368
mdadm -G $md0 --raid-disks 4 -c 64 --backup-file=$bu
restart
checkgeo md0 raid5 4 $[64*1024] 3
devs="$dev0 $dev1 $dev2 $dev3"
mdadm -G $md0 --array-size 19456
mdadm -G $md0 -n 2 -c 256 --backup-file=$bu
restart
checkgeo md0 raid5 2 $[256*1024] 3

114
tests/07changelevels Normal file
View file

@ -0,0 +1,114 @@
# Test changing of level, chunksize etc.
# Create a RAID1, convert to RAID5, add a disk, add another disk
# convert to RAID6, back to RAID5 and ultimately to RAID1
testK=$[64*3*6]
dd if=/dev/urandom of=/tmp/RandFile bs=1024 count=$testK
export MDADM_GROW_VERIFY=1
dotest() {
sleep 2
check wait
testdev $md0 $1 19968 64 nd
blockdev --flushbufs $md0
cmp -s -n $[textK*1024] $md0 /tmp/RandFile || { echo cmp failed; exit 2; }
# write something new - shift chars 4 space
tr ' -~' '$-~ -#' < /tmp/RandFile > /tmp/RandFile2
mv /tmp/RandFile2 /tmp/RandFile
dd if=/tmp/RandFile of=$md0
}
checkgeo() {
# check the geometry of an array
# level raid_disks chunk_size layout
dev=$1
shift
sleep 0.5
check wait
sleep 1
for attr in level raid_disks chunk_size layout
do
if [ $# -gt 0 ] ; then
val=$1
shift
if [ " `cat /sys/block/$dev/md/$attr`" != " $val" ]
then echo "$attr doesn't match for $dev"
exit 1
fi
fi
done
}
bu=/tmp/md-test-backup
rm -f $bu
mdadm -CR $md0 -l1 -n2 -x1 $dev0 $dev1 $dev2 -z 19968
testdev $md0 1 $mdsize1a 64
dd if=/tmp/RandFile of=$md0
dotest 1
mdadm --grow $md0 -l5 -n3 --chunk 64
dotest 2
mdadm $md0 --add $dev3 $dev4
mdadm --grow $md0 -n4 --chunk 32
dotest 3
mdadm -G $md0 -l6 --backup-file $bu
dotest 3
mdadm -G /dev/md0 --array-size 39936
mdadm -G $md0 -n4 --backup-file $bu
checkgeo md0 raid6 4 $[32*1024]
dotest 2
mdadm -G $md0 -l5 --backup-file $bu
checkgeo md0 raid5 3 $[32*1024]
dotest 2
mdadm -G /dev/md0 --array-size 19968
mdadm -G $md0 -n2 --backup-file $bu
checkgeo md0 raid5 2 $[32*1024]
dotest 1
mdadm -G --level=1 $md0
dotest 1
# now repeat that last few steps only with a degraded array.
mdadm -S $md0
mdadm -CR $md0 -l6 -n5 $dev0 $dev1 $dev2 $dev3 $dev4
dd if=/tmp/RandFile of=$md0
dotest 3
mdadm $md0 --fail $dev0
mdadm -G /dev/md0 --array-size 37888
mdadm -G $md0 -n4 --backup-file $bu
dotest 2
checkgeo md0 raid6 4 $[512*1024]
mdadm $md0 --fail $dev4
mdadm $md0 --fail $dev3
# now double-degraded.
# switch layout to a DDF layout and back to make sure that works.
mdadm -G /dev/md0 --layout=ddf-N-continue --backup-file $bu
checkgeo md0 raid6 4 $[512*1024] 10
dotest 2
mdadm -G /dev/md0 --layout=ra --backup-file $bu
checkgeo md0 raid6 4 $[512*1024] 1
dotest 2
mdadm -G $md0 -l5 --backup-file $bu
dotest 2
mdadm -G /dev/md0 --array-size 18944
mdadm -G $md0 -n2 --backup-file $bu
dotest 1
checkgeo md0 raid5 2 $[512*1024]
mdadm $md0 --fail $dev2
mdadm -G --level=1 $md0
dotest 1
checkgeo md0 raid1 2

91
tests/07layouts Normal file
View file

@ -0,0 +1,91 @@
# check that kernel an restripe interpret all the different layouts
# the same
# This involves changing the layout to each different possibility
# while MDADM_GROW_VERIFY is set.
testK=$[64*3*6]
dd if=/dev/urandom of=/tmp/RandFile bs=1024 count=$testK
export MDADM_GROW_VERITY=1
dotest() {
sleep 0.5
check wait
testdev $md0 $1 $mdsize1 512 nd
blockdev --flushbufs $md0
cmp -s -n $[textK*1024] $md0 /tmp/RandFile || { echo cmp failed; exit 2; }
# write something new - shift chars 4 space
tr ' -~' '$-~ -#' < /tmp/RandFile > /tmp/RandFile2
mv /tmp/RandFile2 /tmp/RandFile
dd if=/tmp/RandFile of=$md0
}
checkgeo() {
# check the geometry of an array
# level raid_disks chunk_size layout
dev=$1
shift
sleep 0.5
check wait
for attr in level raid_disks chunk_size layout
do
if [ $# -gt 0 ] ; then
val=$1
shift
if [ " `sed 's/ .*//' /sys/block/$dev/md/$attr`" != " $val" ]
then echo "$attr doesn't match for $dev"
exit 1
fi
fi
done
}
bu=/tmp/md-test-backup
rm -f $bu
# first a degraded 5 device raid5
mdadm -CR $md0 -l5 -n5 $dev0 $dev1 missing $dev2 $dev3
dd if=/tmp/RandFile of=$md0
dotest 4
l5[0]=la
l5[1]=ra
l5[2]=ls
l5[3]=rs
l5[4]=parity-first
l5[5]=parity-last
for layout in 0 1 2 3 4 5 0
do
mdadm -G $md0 --layout=${l5[$layout]} --backup-file $bu
checkgeo md0 raid5 5 $[512*1024] $layout
dotest 4
done
mdadm -S $md0
# now a doubly degraded raid6
mdadm -CR $md0 -l6 -n5 $dev0 missing $dev2 missing $dev4
dd if=/tmp/RandFile of=$md0
dotest 3
l6[0]=la
l6[1]=ra
l6[2]=ls
l6[3]=rs
l6[4]=parity-first
l6[5]=parity-last
l6[8]=ddf-zero-restart
l6[9]=ddf-N-restart
l6[10]=ddf-N-continue
l6[16]=left-asymmetric-6
l6[17]=right-asymmetric-6
l6[18]=left-symmetric-6
l6[19]=right-symmetric-6
l6[20]=parity-first-6
for layout in 0 1 2 3 4 5 8 9 10 16 17 18 19 20 0
do
mdadm -G $md0 --layout=${l6[$layout]} --backup-file $bu
checkgeo md0 raid6 5 $[512*1024] $layout
dotest 3
done

41
tests/07reshape5intr Normal file
View file

@ -0,0 +1,41 @@
#
# test interrupting and restarting raid5 reshape.
set -x
devs="$dev1"
st=UU
for disks in 2 3 4 5
do
eval devs=\"$devs \$dev$disks\"
st=U$st
for d in $devs
do dd if=/dev/urandom of=$d bs=1024 || true
done
case $disks in
2 | 3) chunk=1024;;
4 ) chunk=512;;
5 ) chunk=256;;
esac
mdadm -CR $md0 -amd -l5 -c $chunk -n$disks --assume-clean $devs
mdadm $md0 --add $dev6
echo 20 > /proc/sys/dev/raid/speed_limit_min
echo 20 > /proc/sys/dev/raid/speed_limit_max
mdadm --grow $md0 -n $[disks+1]
check reshape
check state $st
mdadm --stop $md0
mdadm --assemble $md0 $devs $dev6
check reshape
echo 1000 > /proc/sys/dev/raid/speed_limit_min
echo 2000 > /proc/sys/dev/raid/speed_limit_max
check wait
while ! echo check > /sys/block/md0/md/sync_action; do sleep 0.1; done
check wait
mm=`cat /sys/block/md0/md/mismatch_cnt`
if [ $mm -gt 0 ]
then echo >&2 "ERROR mismatch_cnt non-zero : $mm" ; exit 1
fi
mdadm -S $md0
done

52
tests/07revert-grow Normal file
View file

@ -0,0 +1,52 @@
set -e -x
# revert a reshape that is increasing the number of devices,
# raid5, raid6, and raid10
# metadate 0.90 cannot handle RAID10 growth
# metadata 1.0 doesn't get a default headspace, is don't try it either.
for metadata in 0.90 1.1 1.2
do
# RAID5
mdadm -CR --assume-clean $md0 -l5 -n4 -x1 $devlist4 --metadata=$metadata
check raid5
testdev $md0 3 $mdsize1 512
mdadm -G $md0 -n 5
sleep 3
mdadm -S $md0
mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=/tmp/md-backup
check wait
check raid5
testdev $md0 3 $mdsize1 512
mdadm -S $md0
# RAID6
mdadm -CR --assume-clean $md0 -l6 -n4 -x1 $devlist4 --metadata=$metadata
check raid6
testdev $md0 2 $mdsize1 512
mdadm -G $md0 -n 5
sleep 3
mdadm -S $md0
mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=/tmp/md-backup
check wait
check raid6
testdev $md0 2 $mdsize1 512
mdadm -S $md0
if [ $metadata = 0.90 ]; then continue; fi
# RAID10
mdadm -CR --assume-clean $md0 -l10 -n4 -x1 $devlist4 --metadata=$metadata
check raid10
testdev $md0 2 $mdsize1 512
mdadm -G $md0 -n 5
sleep 3
mdadm -S $md0
strace -o /tmp/str ./mdadm -A $md0 --update=revert-reshape $devlist4
check wait
check raid10
testdev $md0 2 $mdsize1 512
mdadm -S $md0
done

44
tests/07revert-inplace Normal file
View file

@ -0,0 +1,44 @@
set -e -x
# revert a reshape that is not changing the number of data devices,
# raid5, raid6, and raid10
# RAID5 -> RAID6
mdadm -CR --assume-clean $md0 -l5 -n4 -x1 $devlist4
check raid5
testdev $md0 3 $mdsize1 512
mdadm -G $md0 -l 6
sleep 2
mdadm -S $md0
mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=/tmp/md-backup
check wait
check raid6
check algorithm 18
testdev $md0 3 $mdsize1 512
mdadm -S $md0
# RAID6 -> RAID5
mdadm -CR --assume-clean $md0 -l6 -n5 $devlist4
check raid6
testdev $md0 3 $mdsize1 512
mdadm -G $md0 -l 5
sleep 2
mdadm -S $md0
mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=/tmp/md-backup
check wait
check raid6
testdev $md0 3 $mdsize1 512
mdadm -S $md0
# RAID10 - decrease chunk size
mdadm -CR --assume-clean $md0 -l10 -n6 -c 64 $devlist5
check raid10
testdev $md0 3 $mdsize1 64
mdadm -G $md0 -c 32
sleep 2
mdadm -S $md0
strace -o /tmp/str ./mdadm -A $md0 --update=revert-reshape $devlist5
check wait
check raid10
testdev $md0 3 $mdsize1 64
mdadm -S $md0

56
tests/07revert-shrink Normal file
View file

@ -0,0 +1,56 @@
set -e -x
# revert a reshape that is decreasing the number of devices,
# raid5, raid6, and raid10
bu=$targetdir/md-backup
rm -f $bu
# RAID5
mdadm -CR --assume-clean $md0 -l5 -n5 $devlist4
check raid5
testdev $md0 4 $mdsize1 512
mdadm --grow $md0 --array-size 56832
testdev $md0 3 $mdsize1 512
mdadm -G $md0 -n 4 --backup=$bu
sleep 3
mdadm -S $md0
mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=$bu
check wait
check raid5
fsck -f -n $md0
testdev $md0 4 $mdsize1 512
mdadm -S $md0
#FIXME
rm -f $bu
# RAID6
mdadm -CR --assume-clean $md0 -l6 -n5 $devlist4
check raid6
testdev $md0 3 $mdsize1 512
mdadm --grow $md0 --array-size 37888
testdev $md0 2 $mdsize1 512
mdadm -G $md0 -n 4 --backup=$bu
sleep 2
mdadm -S $md0
mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=$bu
check wait
check raid6
fsck -f -n $md0
testdev $md0 3 $mdsize1 512
mdadm -S $md0
# RAID10
mdadm -CR --assume-clean $md0 -l10 -n6 $devlist5
check raid10
testdev $md0 3 $mdsize1 512
mdadm --grow $md0 --array-size 36864
testdev $md0 2 $mdsize1 512
mdadm -G $md0 -n 4
sleep 3
mdadm -S $md0
mdadm -A $md0 --update=revert-reshape $devlist5
check wait
check raid10
fsck -f -n $md0
testdev $md0 3 $mdsize1 512
mdadm -S $md0

45
tests/07testreshape5 Normal file
View file

@ -0,0 +1,45 @@
#
# test the reshape code by using test_reshape and the
# kernel md code to move data into and out of variously
# shaped md arrays.
set -x
layouts=(la ra ls rs)
for level in 5 6
do
for chunk in 4 8 16 32 64 128
do
devs="$dev1"
for disks in 2 3 4 5 6
do
eval devs=\"$devs \$dev$disks\"
if [ " $level $disks" = " 6 3" -o " $level $disks" = " 6 2" ]
then continue
fi
for nlayout in 0 1 2 3
do
layout=${layouts[$nlayout]}
size=$[chunk*(disks-(level-4))*disks]
# test restore: make a raid5 from a file, then do a compare
dd if=/dev/urandom of=/tmp/RandFile bs=1024 count=$size
$dir/test_stripe restore /tmp/RandFile $disks $[chunk*1024] $level $nlayout 0 $[size*1024] $devs
mdadm -CR -e 1.0 $md0 -amd -l$level -n$disks --assume-clean -c $chunk -p $layout $devs
cmp -s -n $[size*1024] $md0 /tmp/RandFile || { echo cmp failed ; exit 2; }
# FIXME check parity
# test save
dd if=/dev/urandom of=$md0 bs=1024 count=$size
blockdev --flushbufs $md0 $devs; sync
> /tmp/NewRand
$dir/test_stripe save /tmp/NewRand $disks $[chunk*1024] $level $nlayout 0 $[size*1024] $devs
cmp -s -n $[size*1024] $md0 /tmp/NewRand || { echo cmp failed ; exit 2; }
mdadm -S $md0
udevadm settle
done
done
done
done
exit 0

73
tests/09imsm-assemble Normal file
View file

@ -0,0 +1,73 @@
# validate the prodigal member disk scenario i.e. a former container
# member is returned after having been rebuilt on another system
imsm_check_hold() {
if mdadm --remove $1 $2; then
echo "$2 removal from $1 should have been blocked" >&2
cat /proc/mdstat >&2
mdadm -E $2
exit 1
fi
}
imsm_check_removal() {
if ! mdadm --remove $1 $2 ; then
echo "$2 removal from $1 should have succeeded" >&2
cat /proc/mdstat >&2
mdadm -E $2
exit 1
fi
}
export IMSM_DEVNAME_AS_SERIAL=1
export IMSM_TEST_OROM=1
export IMSM_NO_PLATFORM=1
container=/dev/md/container
member=/dev/md/vol0
num_disks=4
size=$((10*1024))
mdadm -CR $container -e imsm -n $num_disks $dev0 $dev1 $dev2 $dev3
mdadm -CR $member $dev0 $dev2 -n 2 -l 1 -z $size
mdadm --wait $member || true
mdadm -Ss
# make dev0 and dev1 a new rebuild family
mdadm -A $container $dev0 $dev1
mdadm -IR $container
mdadm --wait ${member}_0 || true
mdadm -Ss
# make dev2 and dev3 a new rebuild family
mdadm -A $container $dev2 $dev3
mdadm -IR $container
mdadm --wait ${member}_0 || true
mdadm -Ss
# reassemble and make sure one of the families falls out
mdadm -A $container $dev0 $dev1 $dev2 $dev3
mdadm -IR $container
testdev ${member}_0 1 $size 64
if mdadm --remove $container $dev0 ; then
# the dev[23] family won
imsm_check_removal $container $dev1
imsm_check_hold $container $dev2
imsm_check_hold $container $dev3
else
# the dev[01] family won
imsm_check_hold $container $dev1
imsm_check_removal $container $dev2
imsm_check_removal $container $dev3
fi
mdadm -Ss
# reassemble with a new id for the dev[23] family
mdadm -A $container $dev0 $dev1
mdadm -IR $container
mdadm -A ${container}2 $dev2 $dev3 --update=uuid
mdadm -IR ${container}2
testdev ${member}_0 1 $size 64
testdev ${member}_1 1 $size 64

View file

@ -0,0 +1,78 @@
# sanity check array creation
imsm_check_hold() {
if mdadm --remove $1 $2; then
echo "$2 removal from $1 should have been blocked" >&2
cat /proc/mdstat >&2
mdadm -E $2
exit 1
fi
}
imsm_check_removal() {
if ! mdadm --remove $1 $2 ; then
echo "$2 removal from $1 should have succeeded" >&2
cat /proc/mdstat >&2
mdadm -E $2
exit 1
fi
}
. tests/env-imsm-template
# IMSM rounds to multiples of one mebibyte - 1024K
DEV_ROUND_K=1024
num_disks=2
mdadm -CR $container -e imsm -n $num_disks $dev0 $dev1
imsm_check container $num_disks
# RAID0 + RAID1
size=9000
level=0
chunk=64
offset=0
mdadm -CR $member0 $dev0 $dev1 -n $num_disks -l $level -z $size -c $chunk
imsm_check member $member0 $num_disks $level $size $((size*2)) $offset $chunk
testdev $member0 $num_disks $size $chunk
offset=$(((size & ~(1024 - 1)) + 4096))
size=4000
level=1
chunk=0
mdadm -CR $member1 $dev0 $dev1 -n $num_disks -l $level -z $size
imsm_check member $member1 $num_disks $level $size $size $offset $chunk
testdev $member1 1 $size 64
check wait
mdadm -Ss
# RAID10 + RAID5
num_disks=4
mdadm -CR $container -e imsm -n $num_disks $dev0 $dev1 $dev2 $dev3
imsm_check container $num_disks
size=9000
level=10
chunk=64
offset=0
mdadm -CR $member0 $dev0 $dev1 $dev2 $dev3 -n $num_disks -l $level -z $size -c $chunk
imsm_check member $member0 $num_disks $level $size $((size*2)) $offset $chunk
testdev $member0 $((num_disks-2)) $size $chunk
offset=$(((size & ~(1024 - 1)) + 4096))
size=4000
level=5
mdadm -CR $member1 $dev0 $dev1 $dev2 $dev3 -n $num_disks -l $level -z $size -c $chunk
imsm_check member $member1 $num_disks $level $size $((size*3)) $offset $chunk
testdev $member1 $((num_disks-1)) $size $chunk
check wait
# FAIL / REBUILD
imsm_check_hold $container $dev0
mdadm --fail $member0 $dev0
mdadm --wait-clean --scan || true
imsm_check_removal $container $dev0
mdadm --add $container $dev4
check wait
imsm_check_hold $container $dev4

28
tests/09imsm-overlap Normal file
View file

@ -0,0 +1,28 @@
. tests/env-imsm-template
# create raid arrays with varying degress of overlap
mdadm -CR $container -e imsm -n 6 $dev0 $dev1 $dev2 $dev3 $dev4 $dev5
imsm_check container 6
size=1024
level=1
num_disks=2
mdadm -CR $member0 $dev0 $dev1 -n $num_disks -l $level -z $size
mdadm -CR $member1 $dev1 $dev2 -n $num_disks -l $level -z $size
mdadm -CR $member2 $dev2 $dev3 -n $num_disks -l $level -z $size
mdadm -CR $member3 $dev3 $dev4 -n $num_disks -l $level -z $size
mdadm -CR $member4 $dev4 $dev5 -n $num_disks -l $level -z $size
udevadm settle
offset=0
imsm_check member $member0 $num_disks $level $size 1024 $offset
offset=$((offset+size+4096))
imsm_check member $member1 $num_disks $level $size 1024 $offset
offset=$((offset+size+4096))
imsm_check member $member2 $num_disks $level $size 1024 $offset
offset=$((offset+size+4096))
imsm_check member $member3 $num_disks $level $size 1024 $offset
offset=$((offset+size+4096))
imsm_check member $member4 $num_disks $level $size 1024 $offset

View file

@ -0,0 +1,61 @@
# An array is assembled incompletely.
# Re missing disks get marked as missing and are not allowed back in
. tests/env-ddf-template
tmp=$(mktemp /tmp/mdtest-XXXXXX)
rm -f $tmp /var/tmp/mdmon.log
ret=0
mdadm -CR $container -e ddf -n 4 $dev8 $dev9 $dev10 $dev11
ddf_check container 4
mdadm -CR $member1 -n 4 -l 10 $dev8 $dev10 $dev9 $dev11 -z 10000
mdadm -CR $member0 -n 2 -l 1 $dev8 $dev9 -z 10000
mdadm --wait $member0 || true
mdadm --wait $member1 || true
mdadm -Ss
sleep 1
# Add all devices except those for $member0
mdadm -I $dev10
mdadm -I $dev11
# Start runnable members
mdadm -IRs || true
mdadm -Ss
#[ -f /var/tmp/mdmon.log ] && cat /var/tmp/mdmon.log
# Now reassemble
# This should work because BVDs weren't written to
for d in $dev8 $dev9 $dev10 $dev11; do
mdadm -I $d
done
mdadm -Ss
# Expect consistent state
for d in $dev10 $dev11; do
mdadm -E $d>$tmp
egrep 'state\[0\] : Degraded, Consistent' $tmp || {
ret=1
echo ERROR: $member0 has unexpected state on $d
}
egrep 'state\[1\] : Optimal, Consistent' $tmp || {
ret=1
echo ERROR: $member1 has unexpected state on $d
}
if [ x$(egrep -c 'active/Online$' $tmp) != x2 ]; then
ret=1
echo ERROR: unexpected number of online disks on $d
fi
done
if [ $ret -ne 0 ]; then
mdadm -E $dev10
mdadm -E $dev8
fi
rm -f $tmp /var/tmp/mdmon.log
[ $ret -eq 0 ]

89
tests/10ddf-create Normal file
View file

@ -0,0 +1,89 @@
#
# Test basic DDF functionality.
#
# Create a container with 5 drives
# create a small raid0 across them all,
# then a small raid10 using 4 drives, then a 2disk raid1
# and a 3disk raid5 using the remaining space
#
# add some data, tear down the array, reassemble
# and make sure it is still there.
set -e
. tests/env-ddf-template
sda=$(get_rootdev) || exit 1
mdadm -CR /dev/md/ddf0 -e ddf -n 5 $dev8 $dev9 $dev10 $dev11 $dev12
mdadm -CR r5 -l5 -n5 /dev/md/ddf0 -z 5000
if mdadm -CR r5 -l1 -n2 /dev/md/ddf0 -z 5000
then echo >&2 create with same name should fail ; exit 1
fi
mdadm -CR r10 -l10 -n4 -pn2 /dev/md/ddf0 -z 5000
mdadm -CR r1 -l1 -n2 /dev/md/ddf0
mdadm -CR r0 -l0 -n3 /dev/md/ddf0
testdev /dev/md/r5 4 5000 512
testdev /dev/md/r10 2 5000 512
# r0/r10 will use 4608 due to chunk size, so that leaves 23552 for the rest
testdev /dev/md/r1 1 23552 64
testdev /dev/md/r0 3 23552 512
dd if=$sda of=/dev/md/r0 || true
dd if=$sda of=/dev/md/r10 || true
dd if=$sda of=/dev/md/r1 || true
dd if=$sda of=/dev/md/r5 || true
s0=`sha1sum /dev/md/r0`
s10=`sha1sum /dev/md/r10`
s1=`sha1sum /dev/md/r1`
s5=`sha1sum /dev/md/r5`
mdadm -Ss
mdadm -A /dev/md/ddf0 $dev8 $dev9 $dev10 $dev11 $dev12
mdadm -I /dev/md/ddf0
udevadm settle
s0a=`sha1sum /dev/md/r0`
s10a=`sha1sum /dev/md/r10`
s1a=`sha1sum /dev/md/r1`
s5a=`sha1sum /dev/md/r5`
if [ "$s0" != "$s0a" ]; then
echo r0 did not match ; exit 1;
fi
if [ "$s10" != "$s10a" ]; then
echo r10 did not match ; exit 1;
fi
if [ "$s1" != "$s1a" ]; then
echo r1 did not match ; exit 1;
fi
if [ "$s5" != "$s5a" ]; then
echo r5 did not match ; exit 1;
fi
# failure status just means it has completed already, so ignore it.
mdadm --wait /dev/md/r1 || true
mdadm --wait /dev/md/r10 || true
mdadm --wait /dev/md/r5 || true
mdadm -Dbs > /var/tmp/mdadm.conf
mdadm -Ss
# Now try to assemble using mdadm.conf
mdadm -Asc /var/tmp/mdadm.conf
check nosync # This failed once. The raid5 was resyncing.
udevadm settle
mdadm -Dbs | sort > /tmp/mdadm.conf
sort /var/tmp/mdadm.conf | diff /tmp/mdadm.conf -
mdadm -Ss
# and now assemble fully incrementally.
for i in $dev8 $dev9 $dev10 $dev11 $dev12
do
mdadm -I $i -c /var/tmp/mdadm.conf
done
check nosync
udevadm settle
mdadm -Dbs | sort > /tmp/mdadm.conf
sort /var/tmp/mdadm.conf | diff /tmp/mdadm.conf -
mdadm -Ss
rm /tmp/mdadm.conf /var/tmp/mdadm.conf

View file

@ -0,0 +1,77 @@
# sanity check array creation
ddf_check_hold() {
if mdadm --remove $1 $2; then
echo "$2 removal from $1 should have been blocked" >&2
cat /proc/mdstat >&2
mdadm -E $2
exit 1
fi
}
ddf_check_removal() {
if ! mdadm --remove $1 $2 ; then
echo "$2 removal from $1 should have succeeded" >&2
cat /proc/mdstat >&2
mdadm -E $2
exit 1
fi
}
. tests/env-ddf-template
num_disks=2
mdadm -CR $container -e ddf -n $num_disks $dev8 $dev9
ddf_check container $num_disks
# RAID0 + RAID1
size=9000
level=0
chunk=64
offset=0
layout=0
mdadm -CR $member0 $dev8 $dev9 -n $num_disks -l $level -z $size -c $chunk
ddf_check member $member0 $num_disks $level $size $((size*2)) $offset $chunk $layout
testdev $member0 $num_disks $size $chunk
offset=$(((size & ~(chunk - 1))))
size=4000
level=1
chunk=0
mdadm -CR $member1 $dev8 $dev9 -n $num_disks -l $level -z $size
ddf_check member $member1 $num_disks $level $size $size $offset $chunk $layout
testdev $member1 1 $size 1
check wait
mdadm -Ss
# RAID10 + RAID5
num_disks=4
mdadm -CR $container -e ddf -n $num_disks $dev8 $dev9 $dev10 $dev11
ddf_check container $num_disks
size=9000
level=10
chunk=64
offset=0
layout=2
mdadm -CR $member0 $dev8 $dev9 $dev10 $dev11 -n $num_disks -l $level -z $size -c $chunk
ddf_check member $member0 $num_disks $level $size $((size*2)) $offset $chunk $layout
testdev $member0 $((num_disks-2)) $size $chunk
offset=$(((size & ~(chunk - 1))))
size=4000
level=5
mdadm -CR $member1 $dev8 $dev9 $dev10 $dev11 -n $num_disks -l $level -z $size -c $chunk
ddf_check member $member1 $num_disks $level $size $((size*3)) $offset $chunk $layout
testdev $member1 $((num_disks-1)) $size $chunk
check wait
# FAIL / REBUILD
ddf_check_hold $container $dev8
mdadm --fail $member0 $dev8
mdadm --wait-clean --scan || true
ddf_check_removal $container $dev8
mdadm --add $container $dev12
check wait
ddf_check_hold $container $dev12

View file

@ -0,0 +1,66 @@
# This test creates a RAID1, fails a disk, and immediately
# (simultaneously) creates a new array. This tests for a possible
# race where the meta data reflecting the disk failure may not
# be written when the 2nd array is created.
. tests/env-ddf-template
mdadm --zero-superblock $dev8 $dev9 $dev10 $dev11 $dev12 $dev13
mdadm -CR $container -e ddf -l container -n 2 $dev11 $dev12
#$dir/mdadm -CR $member0 -l raid1 -n 2 $container -z 10000 >/tmp/mdmon.txt 2>&1
mdadm -CR $member0 -l raid1 -n 2 $container -z 10000
check wait
fail0=$dev11
mdadm --fail $member0 $fail0 &
# The test can succeed two ways:
# 1) mdadm -C member1 fails - in this case the meta data
# was already on disk when the create attempt was made
# 2) mdadm -C succeeds in the first place (meta data not on disk yet),
# but mdmon detects the problem and sets the disk faulty.
if mdadm -CR $member1 -l raid1 -n 2 $container; then
echo create should have failed / race condition?
check wait
set -- $(get_raiddisks $member0)
d0=$1
ret=0
if [ $1 = $fail0 -o $2 = $fail0 ]; then
ret=1
else
set -- $(get_raiddisks $member1)
if [ $1 = $fail0 -o $2 = $fail0 ]; then
ret=1
fi
fi
if [ $ret -eq 1 ]; then
echo ERROR: failed disk $fail0 is still a RAID member
echo $member0: $(get_raiddisks $member0)
echo $member1: $(get_raiddisks $member1)
fi
tmp=$(mktemp /tmp/mdest-XXXXXX)
mdadm -E $d0 >$tmp
if [ x$(grep -c 'state\[[01]\] : Degraded' $tmp) != x2 ]; then
echo ERROR: non-degraded array found
mdadm -E $d0
ret=1
fi
if ! grep -q '^ *0 *[0-9a-f]\{8\} .*Offline, Failed' $tmp; then
echo ERROR: disk 0 not marked as failed in meta data
mdadm -E $d0
ret=1
fi
rm -f $tmp
else
ret=0
fi
[ -f /tmp/mdmon.txt ] && {
cat /tmp/mdmon.txt
rm -f /tmp/mdmon.txt
}
[ $ret -eq 0 ]

55
tests/10ddf-fail-readd Normal file
View file

@ -0,0 +1,55 @@
# Simple fail / re-add test
. tests/env-ddf-template
tmp=$(mktemp /tmp/mdtest-XXXXXX)
rm -f $tmp
mdadm --zero-superblock $dev8 $dev9
mdadm -CR $container -e ddf -l container -n 2 $dev8 $dev9
mdadm -CR $member0 -l raid1 -n 2 $container
#$dir/mdadm -CR $member0 -l raid1 -n 2 $container >/tmp/mdmon.txt 2>&1
mke2fs -F $member0
check wait
set -- $(get_raiddisks $member0)
fail0=$1
mdadm $member0 --fail $fail0
sleep 1
mdadm $container --remove $fail0
set -- $(get_raiddisks $member0)
case $1 in MISSING) shift;; esac
good0=$1
# We re-add the disk now
mdadm $container --add $fail0
sleep 1
mdadm --wait $member0 || true
ret=0
set -- $(get_raiddisks $member0)
case $1:$2 in
$dev8:$dev9|$dev9:$dev8);;
*) echo ERROR: bad raid disks "$@"; ret=1;;
esac
mdadm -Ss
for x in $@; do
mdadm -E $x >$tmp
if ! grep -q 'state\[0\] : Optimal, Consistent' $tmp; then
echo ERROR: member 0 should be optimal in meta data on $x
ret=1
fi
done
rm -f $tmp
if [ $ret -ne 0 ]; then
mdadm -E $dev8
mdadm -E $dev9
fi
[ $ret -eq 0 ]

View file

@ -0,0 +1,71 @@
# Simple fail / re-add test
. tests/env-ddf-template
tmp=$(mktemp /tmp/mdtest-XXXXXX)
rm -f $tmp
mdadm --zero-superblock $dev8 $dev9
mdadm -CR $container -e ddf -l container -n 2 $dev8 $dev9
mdadm -CR $member0 -l raid1 -n 2 $container
#$dir/mdadm -CR $member0 -l raid1 -n 2 $container >/tmp/mdmon.txt 2>&1
check wait
set -- $(get_raiddisks $member0)
fail0=$1
mdadm $member0 --fail $fail0
sleep 1
set -- $(get_raiddisks $member0)
case $1 in MISSING) shift;; esac
good0=$1
# Check that the meta data now show one disk as failed
ret=0
for x in $@; do
mdadm -E $x >$tmp
if ! grep -q 'state\[0\] : Degraded, Consistent' $tmp; then
echo ERROR: member 0 should be degraded in meta data on $x
ret=1
fi
phys=$(grep $x $tmp)
case $x:$phys in
$fail0:*active/Offline,\ Failed);;
$good0:*active/Online);;
*) echo ERROR: wrong phys disk state for $x
ret=1
;;
esac
done
mdadm $container --remove $fail0
# We re-add the disk now
mdadm $container --add $fail0
sleep 1
mdadm --wait $member0 || true
set -- $(get_raiddisks $member0)
case $1:$2 in
$dev8:$dev9|$dev9:$dev8);;
*) echo ERROR: bad raid disks "$@"; ret=1;;
esac
mdadm -Ss
for x in $@; do
mdadm -E $x >$tmp
if ! grep -q 'state\[0\] : Optimal, Consistent' $tmp; then
echo ERROR: member 0 should be optimal in meta data on $x
ret=1
fi
done
rm -f $tmp
if [ $ret -ne 0 ]; then
mdadm -E $dev8
mdadm -E $dev9
fi
[ $ret -eq 0 ]

86
tests/10ddf-fail-spare Normal file
View file

@ -0,0 +1,86 @@
# Test suggested by Albert Pauw: Create, fail one disk, have mdmon
# activate the spare,
# then run create again. Shouldn't use the failed disk for Create,
. tests/env-ddf-template
tmp=$(mktemp /tmp/mdtest-XXXXXX)
rm -f $tmp
mdadm --zero-superblock $dev8 $dev9 $dev10 $dev11 $dev12 $dev13
mdadm -CR $container -e ddf -l container -n 5 $dev8 $dev9 $dev10 $dev11 $dev12
mdadm -CR $member0 -l raid1 -n 2 $container
#$dir/mdadm -CR $member0 -l raid1 -n 2 $container >/tmp/mdmon.txt 2>&1
check wait
set -- $(get_raiddisks $member0)
fail0=$1
mdadm --fail $member0 $fail0
# To make sure the spare is activated, we may have to sleep
# 2s has always been enough for me
sleep 2
check wait
# This test can succeed both ways - if spare was activated
# before new array was created, we see only member 0.
# otherwise, we see both, adn member0 is degraded because the
# new array grabbed the spare
# which case occurs depends on the sleep time above.
ret=0
if mdadm -CR $member1 -l raid5 -n 3 $container; then
# Creation successful - must have been quicker than spare activation
check wait
set -- $(get_raiddisks $member1)
if [ $1 = $fail0 -o $2 = $fail0 -o $3 = $fail0 ]; then
echo ERROR: $member1 must not contain $fail0: $@
ret=1
fi
d1=$1
mdadm -E $d1 >$tmp
if ! grep -q 'state\[1\] : Optimal, Consistent' $tmp; then
echo ERROR: member 1 should be optimal in meta data
ret=1
fi
state0=Degraded
else
# Creation unsuccessful - spare was used for member 0
state0=Optimal
fi
# need to delay a little bit, sometimes the meta data aren't
# up-to-date yet
sleep 0.5
set -- $(get_raiddisks $member0)
if [ $1 = $fail0 -o $2 = $fail0 ]; then
echo ERROR: $member0 must not contain $fail0: $@
ret=1
fi
d0=$1
[ -f $tmp ] || mdadm -E $d0 >$tmp
if ! grep -q 'state\[0\] : '$state0', Consistent' $tmp; then
echo ERROR: member 0 should be $state0 in meta data
ret=1
fi
if ! grep -q 'Offline, Failed' $tmp; then
echo ERROR: Failed disk expected in meta data
ret=1
fi
if [ $ret -eq 1 ]; then
cat /proc/mdstat
mdadm -E $d0
mdadm -E $d1
mdadm -E $fail0
fi
[ -f /tmp/mdmon.txt ] && {
cat /tmp/mdmon.txt
rm -f /tmp/mdmon.txt
}
rm -f $tmp
[ $ret -eq 0 ]

View file

@ -0,0 +1,66 @@
# Simple fail / re-add test
. tests/env-ddf-template
tmp=$(mktemp /tmp/mdtest-XXXXXX)
rm -f $tmp
mdadm --zero-superblock $dev8 $dev9
mdadm -CR $container -e ddf -l container -n 2 $dev8 $dev9
mdadm -CR $member0 -l raid1 -n 2 $container
#$dir/mdadm -CR $member0 -l raid1 -n 2 $container >/tmp/mdmon.txt 2>&1
# Write to the array
mke2fs -F $member0
check wait
set -- $(get_raiddisks $member0)
fail0=$1
mdadm $member0 --fail $fail0
sleep 1
mdadm $container --remove $fail0
set -- $(get_raiddisks $member0)
case $1 in MISSING) shift;; esac
good0=$1
mdadm -Ss
sleep 1
# Now simulate incremental assembly
mdadm -I $good0
mdadm -IRs || true
# Write to the array
mke2fs -F $member0
# We re-add the disk now
mdadm $container --add $fail0
sleep 1
mdadm --wait $member0 || true
ret=0
set -- $(get_raiddisks $member0)
case $1:$2 in
$dev8:$dev9|$dev9:$dev8);;
*) echo ERROR: bad raid disks "$@"; ret=1;;
esac
mdadm -Ss
for x in $@; do
mdadm -E $x >$tmp
if ! grep -q 'state\[0\] : Optimal, Consistent' $tmp; then
echo ERROR: member 0 should be optimal in meta data on $x
ret=1
fi
done
rm -f $tmp
if [ $ret -ne 0 ]; then
mdadm -E $dev8
mdadm -E $dev9
fi
[ $ret -eq 0 ]

59
tests/10ddf-fail-twice Normal file
View file

@ -0,0 +1,59 @@
. tests/env-ddf-template
num_disks=5
mdadm -CR $container -e ddf -n $num_disks $dev8 $dev9 $dev10 $dev11 $dev12
ddf_check container $num_disks
mdadm -CR $member0 -n 2 -l 1 $container
mdadm -CR $member1 -n 3 -l 5 $container
mdadm --wait $member1 $member0 || mdadm --wait $member1 $member0 || true
set -- $(get_raiddisks $member0)
fail0=$1
mdadm $member0 --fail $fail0
set -- $(get_raiddisks $member1)
fail1=$1
mdadm $member1 --fail $fail1
mdadm $container --add $dev13
mdadm --wait $member1 $member0 || mdadm --wait $member1 $member0 || true
devs0="$(get_raiddisks $member0)"
devs1="$(get_raiddisks $member1)"
present=$(($(get_present $member0) + $(get_present $member1)))
[ $present -eq 4 ] || {
echo expected 4 present disks, got $present
devices for $member0: $devs0
devices for $member1: $devs1
exit 1
}
if echo "$devs0" | grep -q MISSING; then
good=1
bad=0
else
good=0
bad=1
fi
# find a good device
eval "set -- \$devs$good"
check=$1
tmp=$(mktemp /tmp/mdtest-XXXXXX)
mdadm -E $check >$tmp
{ grep -q 'state\['$bad'\] : Degraded, Consistent' $tmp &&
grep -q 'state\['$good'\] : Optimal, Consistent' $tmp; } || {
echo unexpected meta data state on $check
mdadm -E $check
rm -f $tmp
exit 1
}
rm -f $tmp
exit 0

View file

@ -0,0 +1,86 @@
# Simulate two disks failing shorty after each other
. tests/env-ddf-template
sda=$(get_rootdev) || exit 1
tmp=$(mktemp /tmp/mdtest-XXXXXX)
mdadm --zero-superblock $dev8 $dev9 $dev10 $dev11 $dev12 $dev13
mdadm -CR $container -e ddf -l container -n 6 \
$dev8 $dev9 $dev10 $dev11 $dev12 $dev13
#fast_sync
mdadm -CR $member0 -l raid6 -n 4 $dev10 $dev11 $dev12 $dev13 -z 16384
#$dir/mdadm -CR $member0 -l raid6 -n 4 $dev10 $dev11 $dev12 $dev13 -z 16384 \
# >/tmp/mdmon.txt 2>&1
mdadm -CR $member1 -l raid10 -n 4 $dev10 $dev11 $dev12 $dev13 -z 16384
dd if=$sda of=$member0 bs=1M count=32
dd if=$sda of=$member1 bs=1M skip=16 count=16
check wait
sum0=$(sha1sum $member0)
sum1=$(sha1sum $member1)
mdadm --fail $member1 $dev11
sleep 1
mdadm --fail $member1 $dev12
# We will have 4 resync procedures, 2 spares for 2 arrays.
mdadm --wait $member1 $member0 || true
mdadm --wait $member1 $member0 || true
devs0="$(get_raiddisks $member0)"
devs1="$(get_raiddisks $member1)"
expected="$dev10
$dev13
$dev8
$dev9"
ret=0
if [ "$(echo "$devs0" | sort)" != "$expected" \
-o "$(echo "$devs1" | sort)" != "$expected" ]; then
echo ERROR: unexpected members
echo $member0: $devs0
echo $member1: $devs1
ret=1
fi
mdadm -E $dev10 >$tmp
if ! grep -q 'state\[0\] : Optimal, Consistent' $tmp; then
echo ERROR: $member0 should be optimal in meta data
ret=1
fi
if ! grep -q 'state\[1\] : Optimal, Consistent' $tmp; then
echo ERROR: $member1 should be optimal in meta data
ret=1
fi
if [ x"$(grep -c active/Online $tmp)" != x4 ]; then
echo ERROR: expected 4 online disks
ret=1
fi
if [ x"$(grep -c "Offline, Failed" $tmp)" != x2 ]; then
echo ERROR: expected 2 failed disks
ret=1
fi
sum0a=$(sha1sum $member0)
sum1a=$(sha1sum $member1)
if [ "$sum0" != "$sum0a" -o "$sum1" != "$sum1a" ]; then
echo ERROR: checksum mismatch
ret=1
fi
if [ $ret -eq 1 ]; then
cat /proc/mdstat
cat $tmp
fi
[ -f /tmp/mdmon.txt ] && {
cat /tmp/mdmon.txt
rm -f /tmp/mdmon.txt
}
rm -f $tmp
[ $ret -eq 0 ]

82
tests/10ddf-geometry Normal file
View file

@ -0,0 +1,82 @@
#
# Test various RAID geometries, creation and deletion of subarrays
#
assert_fail() {
if mdadm "$@"; then
echo mdadm "$@" must fail
return 1
else
return 0
fi
}
assert_kill() {
local dev=$1 n=$2
mdadm -S $dev
mdadm --kill-subarray=$n /dev/md/ddf0
if mdadm -Dbs | grep -q $dev; then
echo >&2 $dev should be deleted
return 1
fi
return 0
}
set -e
mdadm -CR /dev/md/ddf0 -e ddf -n 6 $dev8 $dev9 $dev10 $dev11 $dev12 $dev13
# RAID1 geometries
# Use different sizes to make offset calculation harder
mdadm -CR l1s -l1 -n2 /dev/md/ddf0 -z 8000
mdadm -CR l1m -l1 -n3 $dev8 $dev9 $dev10 -z 10000
assert_fail -CR badl1 -l1 -n4 /dev/md/ddf0
# RAID10 geometries
mdadm -CR l10_0 -l10 -n3 /dev/md/ddf0 -z 1000
mdadm -CR l10_1 -l10 -n5 /dev/md/ddf0 -z 1000
assert_fail mdadm -CR badl10 -l10 -n4 -pn3 /dev/md/ddf0
mdadm -CR l10_2 -l10 -n6 -pn2 /dev/md/ddf0 -z 4000
mdadm -CR l10_3 -l10 -n6 -pn3 /dev/md/ddf0 -z 4000
assert_fail -CR l10_2 -l10 -n6 -pn2 /dev/md/ddf0 -z 5000
assert_kill /dev/md/l10_2 4
# gone now, must be able to create it again
mdadm -CR l10_2 -l10 -n6 -pn2 /dev/md/ddf0 -z 5000
# Now stop and reassemble
mdadm -Ss
mdadm -A /dev/md/ddf0 $dev8 $dev9 $dev10 $dev11 $dev12 $dev13
# Same as above, on inactive container
assert_fail -CR l10_3 -l10 -n6 -pn2 /dev/md/ddf0 -z 5000
# Kill subarray without having started anything (no mdmon)
mdadm --kill-subarray=5 /dev/md/ddf0
mdadm -I /dev/md/ddf0
mdadm -CR l10_3 -l10 -n6 -pn3 /dev/md/ddf0 -z 5000
assert_kill /dev/md/l10_2 4
assert_kill /dev/md/l10_3 5
# RAID5 geometries
mdadm -CR l5la -l5 -n3 --layout=ddf-N-restart /dev/md/ddf0 -z 5000
mdadm -CR l5ra -l5 -n3 --layout=ddf-zero-restart /dev/md/ddf0 -z 5000
mdadm -CR l5ls -l5 -n3 --layout=ddf-N-continue /dev/md/ddf0 -z 5000
assert_fail -CR l5rs -l5 -n3 -prs /dev/md/ddf0 -z 5000
# Stop and reassemble
mdadm -Ss
mdadm -A /dev/md/ddf0 $dev8 $dev9 $dev10 $dev11 $dev12 $dev13
mdadm -I /dev/md/ddf0
assert_kill /dev/md/l5la 4
assert_kill /dev/md/l5ls 6
assert_kill /dev/md/l5ra 5
# RAID6 geometries
assert_fail -CR l6la -l6 -n3 -pla /dev/md/ddf0 -z 5000
assert_fail -CR l6rs -l5 -n4 -prs /dev/md/ddf0 -z 5000
mdadm -CR l6la -l6 -n4 --layout=ddf-N-restart /dev/md/ddf0 -z 5000
mdadm -CR l6ra -l6 -n4 --layout=ddf-zero-restart $dev8 $dev9 $dev10 $dev11 -z 5000
mdadm -CR l6ls -l6 -n4 --layout=ddf-N-continue $dev13 $dev8 $dev9 $dev12 -z 5000
mdadm -Ss

View file

@ -0,0 +1,131 @@
# An array is assembled incompletely. Some disks will
# have later metadata than others.
# The array is then reassembled in the "wrong" order -
# older meta data first.
# This FAILS with mdadm 3.3
. tests/env-ddf-template
tmp=$(mktemp /tmp/mdtest-XXXXXX)
rm -f $tmp /var/tmp/mdmon.log
ret=0
mdadm -CR $container -e ddf -n 4 $dev8 $dev9 $dev10 $dev11
ddf_check container 4
mdadm -CR $member1 -n 4 -l 10 $dev8 $dev10 $dev9 $dev11 -z 10000
mdadm -CR $member0 -n 2 -l 1 $dev8 $dev9 -z 10000
mdadm --wait $member0 || true
mdadm --wait $member1 || true
mke2fs -F $member0
mke2fs -F $member1
sha_0a=$(sha1_sum $member0)
sha_1a=$(sha1_sum $member1)
mdadm -Ss
sleep 1
# Add all devices except those for $member0
mdadm -I $dev10
mdadm -I $dev11
# Start runnable members ($member1) and write
mdadm -IRs || true
e2fsck -fy $member1
sha_1b=$(sha1_sum $member1)
mdadm -Ss
sleep 1
# Seq number should be different now
seq8a=$(mdadm -E $dev8 | sed -n 's/^ *Seq : //p')
seq10a=$(mdadm -E $dev10 | sed -n 's/^ *Seq : //p')
if [ $seq8a -ge $seq10a ]; then
ret=1
echo ERROR: sequential number of $dev10 not bigger than $dev8
fi
if [ x$sha_1a = x$sha_1b ]; then
ret=1
echo ERROR: sha1sums equal after write
fi
#[ -f /var/tmp/mdmon.log ] && cat /var/tmp/mdmon.log
# Now reassemble
# Note that we add the previously missing disks first.
# $dev10 should have a higher seq number than $dev8
for d in $dev8 $dev9 $dev10 $dev11; do
mdadm -I $d
done
mdadm -IRs || true
sha_0c=$(sha1_sum $member0)
sha_1c=$(sha1_sum $member1)
mdadm -Ss
sleep 1
seq8c=$(mdadm -E $dev8 | sed -n 's/^ *Seq : //p')
seq10c=$(mdadm -E $dev10 | sed -n 's/^ *Seq : //p')
if [ x$sha_0a != x$sha_0c ]; then
ret=1
echo ERROR: sha1sum of $member0 has changed
fi
if [ x$sha_1b != x$sha_1c ]; then
ret=1
echo ERROR: sha1sum of $member1 has changed
fi
if [ \( $seq10a -ge $seq10c \) -o \( $seq8c -ne $seq10c \) ]; then
ret=1
echo ERROR: sequential numbers are wrong
fi
# Expect consistent state
for d in $dev10 $dev8; do
mdadm -E $d>$tmp
for x in 0 1; do
egrep 'state\['$x'\] : Optimal, Consistent' $tmp || {
ret=1
echo ERROR: $member0 has unexpected state on $d
}
done
if [ x$(egrep -c 'active/Online$' $tmp) != x4 ]; then
ret=1
echo ERROR: unexpected number of online disks on $d
fi
done
# Now try assembly
if mdadm -A $container $dev8 $dev9 $dev10 $dev11; then
mdadm -IR $container
sha_0d=$(sha1_sum $member0)
sha_1d=$(sha1_sum $member1)
mdadm -Ss
sleep 1
seq8d=$(mdadm -E $dev8 | sed -n 's/^ *Seq : //p')
seq10d=$(mdadm -E $dev10 | sed -n 's/^ *Seq : //p')
if [ x$sha_0a != x$sha_0d ]; then
ret=1
echo ERROR: sha1sum of $member0 has changed
fi
if [ x$sha_1b != x$sha_1d ]; then
ret=1
echo ERROR: sha1sum of $member1 has changed
fi
if [ \( $seq10a -ge $seq10d \) -o \( $seq8d -ne $seq10d \) ]; then
ret=1
echo ERROR: sequential numbers are wrong
fi
else
ret=1
echo ERROR: assembly failed
fi
if [ $ret -ne 0 ]; then
mdadm -E $dev10
mdadm -E $dev8
fi
rm -f $tmp /var/tmp/mdmon.log
[ $ret -eq 0 ]

View file

@ -0,0 +1,18 @@
#
# An array is assembled with one device missing.
# The other device must be marked as Failed in metadata
. tests/env-ddf-template
mdadm -CR $container -e ddf -n 2 $dev8 $dev9
ddf_check container 2
mdadm -CR $member1 -n 2 -l1 $dev8 $dev9
mdadm --wait $member1 || true
mdadm -Ss
mdadm -I $dev8
mdadm -R $container
mkfs $member1
# There must be a missing device recorded
mdadm --examine $dev8 | grep 'Raid Devices.*--' || exit 1

454
tests/11spare-migration Normal file
View file

@ -0,0 +1,454 @@
# Set of tests for autorebuild functionality using mdadm -F
# To be able to test ddf one must have all loop devices of bigger size, with the ones
# above number 7 bigger again by any amount (this is not changed for now as it
# could affect other tests)
export IMSM_DEVNAME_AS_SERIAL=1
export IMSM_TEST_OROM=1
export IMSM_NO_PLATFORM=1
. tests/utils
set -ex
verbose="yes"
sleeptime=10
# if listfailed=yes then don't exit if test failed due to wrong
# spare-migration and just print a list at the end. Other errors still
# stop the test.
# if listfailed=no then exit on first failure
listfailed="yes"
# start Monitor, set monitorpid
# uses global scan variable
# all parameters are numbers of devices to be monitored. only used when $scan="no"
# eg. monitor 0 1 will start monitoring of containers c0, c1 and subarrays v0, v1
monitor(){
[ -z $monitorpid ] || return
if [ "$scan" == "yes" ]; then
$mdadm -F -d 1 --scan --mail root@localhost -c $config &
monitorpid=$!
return
fi
unset mddevs
while [ -n "$1" ]
do
eval container=\$c$1
eval volumes=\$v$1
mddevs="$mddevs /dev/$container"
if [ "$container" != "$volumes" ]; then
for vol in $volumes; do
mddevs="$mddevs /dev/$vol"
done
fi
shift
done
if [ -n "$mddevs" ]; then
if [ "$verbose" != "yes" ]; then
$mdadm -F -d 1 $mddevs -c $config >&2 &
monitorpid=$!
else
$mdadm -F -t -d 1 $mddevs -c $config &
monitorpid=$!
fi
fi
[ "$verbose" != "yes" ] || echo $mddevs $monitorpid
}
test0()
{
dsc "Test 0: No config file, no spare should be moved"
> $config
setupdevs 0 0 1 $platform
setupdevs 1 3 4 $platform
monitor 0 1
mdadm -a /dev/$c1 $dev2
mdadm --fail /dev/$v0 $dev0
# check that spare loop2 was not moved from container c1 to container c0
chksparemoved $c1 $c0 $dev2 n
tidyup
}
test0a()
{
dsc "Test 0a: No domains in config file, no spare should be moved"
setupdevs 0 0 1 $platform
setupdevs 1 3 4 $platform
createconfig a
monitor 0 1
mdadm -a /dev/$c1 $dev2
mdadm --fail /dev/$v0 $dev0
# check that spare loop2 was not moved from container c1 to container c0
chksparemoved $c1 $c0 $dev2 n
tidyup
}
test1()
{
dsc "Test 1: Common domain, add disk to one container and fail first one in another container, spare should be moved"
setupdevs 0 0 1 $platform
setupdevs 1 3 4 $platform
# create config file with arrays and common domain
createconfig a
createconfig domain-$platform $platform spare 0 1 2 3 4
monitor 0 1
mdadm -a /dev/$c1 $dev2
mdadm --fail /dev/$v0 $dev0
# check that spare loop2 was moved from container c1 to container c0
chksparemoved $c1 $c0 $dev2
tidyup
}
test1a()
{
dsc "Test 1a: Common domain, add disk to one container and fail second one in another container, spare should be moved"
setupdevs 0 0 1 $platform
setupdevs 1 3 4 $platform
createconfig a
createconfig domain-$platform $platform spare 0 1 2 3 4
monitor 0 1
mdadm -a /dev/$c1 $dev2
mdadm --fail /dev/$v0 $dev1
# check that spare loop2 was moved from container c1 to container c0
chksparemoved $c1 $c0 $dev2
tidyup
}
test2()
{
dsc "Test 2: Common domain, fail disk in one container and add one to another container, spare should be moved"
setupdevs 0 0 1 $platform
setupdevs 1 3 4 $platform
createconfig a
createconfig domain-$platform $platform spare 0 1 2 3 4
monitor 0 1
mdadm --fail /dev/$v0 $dev1
mdadm -a /dev/$c1 $dev2
chksparemoved $c1 $c0 $dev2
tidyup
}
test3()
{
dsc "Test 3: Two domains, fail a disk in one domain, add a disk to another domain, the spare should not be moved"
setupdevs 0 0 1 $platform
setupdevs 1 3 4 $platform
# create config file with 2 domains
createconfig a
createconfig domain-$platform"1" $platform spare 0 1 2
createconfig domain-$platform"2" $platform spare 3 4 5
monitor 0 1
mdadm --fail /dev/$v0 $dev1
mdadm -a /dev/$c1 $dev5
chksparemoved $c1 $c0 $dev5 n
tidyup
}
test4()
{
dsc "Test 4: One domain holds one container, fail a disk in domain, and add disk to a container not described by domain, move if metadata allows"
setupdevs 0 0 1 $platform
setupdevs 1 3 4 $platform
createconfig a
createconfig domain-$platform $platform spare 0 1 2
monitor 0 1
mdadm --fail /dev/$v0 $dev1
mdadm -a /dev/$c1 $dev5
unset shouldmove
[ "$platform" == "imsm" ] || shouldmove="n"
chksparemoved $c1 $c0 $dev5 $shouldmove
tidyup
}
test5()
{
dsc "Test 5: Two domains, two containers in each domain"
setupdevs 0 0 1 $platform
setupdevs 1 3 4 $platform
setupdevs 2 5 6 $platform
setupdevs 3 8 10 $platform
# 2 and 9 for spares
createconfig a
createconfig domain-$platform"1" $platform spare 0 1 2 3 4
createconfig domain-$platform"2" $platform spare 5 6 8 9 10
monitor 0 1 2 3
test5a
test5b
test5c
tidyup
}
test5a()
{
dsc "Test 5a: Two containers in each domain, add spare loop2 to domain1 and fail disk in the other domain, the spare should not be moved"
mdadm -a /dev/$c0 $dev2
mdadm --fail /dev/$v2 $dev5
chksparemoved $c0 $c2 $dev2 n
}
test5b()
{
dsc "Test 5b: Fail disk in the same domain but different container, spare loop2 should be moved"
mdadm --fail /dev/$v1 $dev3
chksparemoved $c0 $c1 $dev2
}
test5c()
{
dsc "Test 5c: Add spare loop9 to different container in domain with degraded array, spare should be moved"
mdadm -a /dev/$c3 $dev9
chksparemoved $c3 $c2 $dev9
}
test6()
{
dsc "Test 6: One domain has two containers, fail a disk in one container, there is a spare in other container too small to use for rebuild"
setupdevs 0 0 1 $platform
setupdevs 1 8 9 $platform
# all devices in one domain
createconfig a
createconfig domain-$platform $platform spare 0 1 2 8 9
monitor 0 1
mdadm -a /dev/$c0 $dev2
mdadm --fail /dev/$v1 $dev8
chksparemoved $c0 $c1 $dev2 n
tidyup
}
test7()
{
dsc "Test 7: One domain, add small spare to container, fail disk in array, spare not used, add suitable spare to other container, spare should be moved"
setupdevs 0 0 1 $platform
setupdevs 1 8 9 $platform
createconfig a
createconfig domain-$platform $platform spare 0 1 2 8 9 10
monitor 0 1
mdadm -a /dev/$c1 $dev2
mdadm --fail /dev/$v1 $dev8
mdadm -a /dev/$c0 $dev10
chksparemoved $c0 $c1 $dev10
tidyup
}
test7a()
{
dsc "Test 7a: Small spare in parent, suitable one in other container, $dev2 in $c1 is not in common domain"
setupdevs 0 0 1 $platform
setupdevs 1 8 9 $platform
#all $platform devices in one domain
createconfig a
createconfig domain-$platform"1" $platform spare 0 1 8 9 10
createconfig domain-$platform"2" $platform spare 2
monitor 0 1
mdadm -a /dev/$c1 $dev2
chkspare $c1 $dev2
mdadm --fail /dev/$v1 $dev8
mdadm -a /dev/$c0 $dev10
chksparemoved $c0 $c1 $dev10
tidyup
}
test8()
{
# ddf does not have getinfo_super_disks implemented so skip this test
return
dsc "Test 8: imsm and ddf - spare should not be migrated"
setupdevs 0 10 11 imsm
setupdevs 1 8 9 ddf
createconfig a
createconfig domain0 noplatform spare 8 9 10 11 12
monitor 0 1
mdadm -a /dev/$c1 $dev12
mdadm --fail /dev/$v0 $dev10
chksparemoved $c1 $c0 $dev12 n
tidyup
}
test9()
{
dsc "Test 9: imsm and native 1.2 - one domain, no metadata specified, spare should be moved"
setupdevs 0 10 11 imsm
setupdevs 1 8 9 1.2
createconfig a
createconfig domain0 noplatform spare 8 9 10 11 12
monitor 0 1
mdadm -a /dev/$c1 $dev12
mdadm --fail /dev/$v0 $dev10
chksparemoved $c1 $c0 $dev12
tidyup
}
test9a()
{
dsc "Test 9a: imsm and native 1.2 - spare in global domain, should be moved"
setupdevs 0 10 11 imsm
setupdevs 1 8 9 1.2
createconfig a
createconfig domain-global noplatform spare 8 9 10 11 12
createconfig domain-1.2 1.2 spare 8 9
createconfig domain-imsm imsm spare 10 11
monitor 0 1
mdadm -a /dev/$c1 $dev12
mdadm --fail /dev/$v0 $dev10
chksparemoved $c1 $c0 $dev12
tidyup
}
test10()
{
dsc "Test 10: Two arrays on the same devices in container"
setupdevs 0 0 1 $platform 10000
setupdevs 1 3 4 $platform
createconfig a
createconfig domain-$platform $platform spare 0 1 2 3 4 5
monitor 0 1
mdadm -a /dev/$c1 $dev2
mdadm --fail /dev/md/sub0_ $dev0
chksparemoved $c1 $c0 $dev2
if [ $failed -eq 0 ]; then
# now fail the spare and see if we get another one
mdadm --fail /dev/md/sub0_ $dev2
mdadm -a /dev/$c1 $dev5
chksparemoved $c1 $c0 $dev5
fi
tidyup
}
test11()
{
dsc "Test 11: Failed spare from other container should not be used"
setupdevs 0 0 1 $platform
setupdevs 1 3 4 $platform
createconfig a
createconfig domain-$platform $platform spare 0 1 2 3 4
monitor 0 1
mdadm -a /dev/$c1 $dev2
mdadm --fail /dev/$v1 $dev3
#wait until recovery finishes so no degraded array in c1
check wait
mdadm --fail /dev/$v0 $dev0
chksparemoved $c1 $c0 $dev3 n
tidyup
}
test12()
{
dsc "Test 12: Only one spare should be taken for rebuild, second not needed"
setupdevs 0 0 1 $platform
setupdevs 1 3 4 $platform
createconfig a
createconfig domain-$platform $platform spare 0 1 2 3 4 5
monitor 0 1
mdadm -a /dev/$c1 $dev2
mdadm -a /dev/$c1 $dev5
mdadm --fail /dev/$v0 $dev0
sleep $sleeptime
chkarray $dev2 n
sc1=$c
chkarray $dev5 n
sc2=$c
[ "$sc1" != "$sc2" ] || err "both spares in the same container $sc1"
tidyup
}
test13()
{
dsc "Test 13: Common domain, two containers, fail a disk in container, action is below spare, the spare should be moved regadless of action"
setupdevs 0 0 1 $platform
setupdevs 1 4 5 $platform
# same domain but different action on 4 5 6
createconfig a
createconfig domain-$platform $platform spare 0 1
createconfig domain-$platform $platform include 4 5 6
monitor 0 1
mdadm -a /dev/$c1 $dev6
mdadm --fail /dev/$v0 $dev0
chksparemoved $c1 $c0 $d6
tidyup
}
test14()
{
dsc "Test 14: One domain, small array on big disks, check if small spare is accepted"
setupdevs 0 8 9 $platform 10000 1
setupdevs 1 0 1 $platform
createconfig a
createconfig domain-$platform $platform spare 0 1 2 8 9
monitor 0 1
mdadm -a /dev/$c1 $dev2
mdadm --fail /dev/$v0 $dev9
chksparemoved $c1 $c0 $d2
tidyup
}
test15()
{
dsc "Test 15: spare in global domain for $platform metadata, should be moved"
# this is like 9a but only one metadata used
setupdevs 0 10 11 $platform
setupdevs 1 8 9 $platform
createconfig a
createconfig domain-global $platform spare 8 9 10 11 12
createconfig domain-1 $platform spare 8 9
createconfig domain-2 $platform spare 10 11
monitor 0 1
mdadm -a /dev/$c1 $dev12
mdadm --fail /dev/$v0 $dev10
chksparemoved $c1 $c0 $dev12
tidyup
}
try()
{
test0
test0a
test1
test1a
test2
test3
test4
test5
test6
if [ "$platform" != "1.2" ]; then
# this is because we can't have a small spare added to native array
test7
test7a
fi
test8
test9
test9a
if [ "$platform" != "1.2" ]; then
# we can't create two subarrays on the same devices for native (without
# partitions)
test10
fi
test11
test12
test13
test14
test15
}
try_failed()
{
platform="1.2"
scan="no"
test5
test9
test13
scan="yes"
test9
}
#try_failed
for scan in no yes; do
for platform in 1.2 imsm; do
try
done
done
[ $listfailed == "no" ] || [ -z $flist ] || echo -e "\n FAILED TESTS: $flist"
#cat $targetdir/log
rm -f /dev/disk/by-path/loop*

View file

@ -0,0 +1,20 @@
. tests/env-imsm-template
# RAID 0 volume, 2 disks grow to RAID 0 volume, 3 disks
# POSITIVE test
num_disks=2
device_list="$dev0 $dev1"
spare_list="$dev2"
# Before: RAID 0 volume, 2 disks, 64k chunk size
vol0_level=0
vol0_comp_size=$((5 * 1024))
vol0_chunk=64
vol0_num_comps=$num_disks
vol0_offset=0
# After: RAID 0 volume, 3 disks, 64k chunk size
vol0_new_num_comps=$((num_disks + 1))
. tests/imsm-grow-template 0 0

View file

@ -0,0 +1,20 @@
. tests/env-imsm-template
# RAID 0 volume, 2 disks grow to RAID 0 volume, 4 disks
# POSITIVE test
num_disks=2
device_list="$dev0 $dev1"
spare_list="$dev2 $dev3"
# Before: RAID 0 volume, 2 disks, 64k chunk size
vol0_level=0
vol0_comp_size=$((5 * 1024))
vol0_chunk=64
vol0_num_comps=$num_disks
vol0_offset=0
# After: RAID 0 volume, 4 disks, 64k chunk size
vol0_new_num_comps=$((num_disks + 2))
. tests/imsm-grow-template 0 0

View file

@ -0,0 +1,20 @@
. tests/env-imsm-template
# RAID 0 volume, 2 disks grow to RAID 0 volume, 5 disks
# POSITIVE test
num_disks=2
device_list="$dev0 $dev1"
spare_list="$dev2 $dev3 $dev4"
# Before: RAID 0 volume, 2 disks, 64k chunk size
vol0_level=0
vol0_comp_size=$((5 * 1024))
vol0_chunk=64
vol0_num_comps=$num_disks
vol0_offset=0
# After: RAID 0 volume, 5 disks, 64k chunk size
vol0_new_num_comps=$((num_disks + 3))
. tests/imsm-grow-template 0 0

View file

@ -0,0 +1,20 @@
. tests/env-imsm-template
# RAID 0 volume, 3 disks grow to RAID 0 volume, 4 disks
# POSITIVE test
num_disks=3
device_list="$dev0 $dev1 $dev2"
spare_list="$dev3"
# Before: RAID 0 volume, 3 disks, 64k chunk size
vol0_level=0
vol0_comp_size=$((5 * 1024))
vol0_chunk=64
vol0_num_comps=$num_disks
vol0_offset=0
# After: RAID 0 volume, 4 disks, 64k chunk size
vol0_new_num_comps=$((num_disks + 1))
. tests/imsm-grow-template 0 0

View file

@ -0,0 +1,20 @@
. tests/env-imsm-template
# RAID 5 volume, 3 disks grow to RAID 5 volume, 4 disks
# POSITIVE test
num_disks=3
device_list="$dev0 $dev1 $dev2"
spare_list="$dev3"
# Before: RAID 5 volume, 3 disks, 64k chunk size
vol0_level=5
vol0_comp_size=$((5 * 1024))
vol0_chunk=64
vol0_num_comps=$((num_disks - 1))
vol0_offset=0
# After: RAID 5 volume, 4 disks, 64k chunk size
vol0_new_num_comps=$num_disks
. tests/imsm-grow-template 0 0

View file

@ -0,0 +1,20 @@
. tests/env-imsm-template
# RAID 5 volume, 3 disks grow to RAID 5 volume, 5 disks
# POSITIVE test
num_disks=3
device_list="$dev0 $dev1 $dev2"
spare_list="$dev3 $dev4"
# Before: RAID 5 volume, 3 disks, 64k chunk size
vol0_level=5
vol0_comp_size=$((5 * 1024))
vol0_chunk=64
vol0_num_comps=$((num_disks - 1))
vol0_offset=0
# After: RAID 5 volume, 5 disks, 64k chunk size
vol0_new_num_comps=$((num_disks + 1))
. tests/imsm-grow-template 0 0

View file

@ -0,0 +1,29 @@
. tests/env-imsm-template
# Grow the container (arrays inside) from 2 disks to 4 disks
# POSITIVE test
num_disks=2
device_list="$dev0 $dev1"
spare_list="$dev2 $dev3"
# Before: RAID 0 volume in slot #0, 2 disks, 128k chunk size
# RAID 0 volume in slot #1, 2 disks, 64k chunk size
vol0_level=0
vol0_comp_size=$((5 * 1024))
vol0_chunk=128
vol0_num_comps=$num_disks
vol0_offset=0
vol1_level=0
vol1_comp_size=$((5 * 1024))
vol1_chunk=64
vol1_num_comps=$num_disks
vol1_offset=$((vol0_comp_size + 4096))
# After: RAID 0 volume in slot #0, 4 disks, 128k chunk size
# RAID 0 volume in slot #1, 4 disks, 64k chunk size
vol0_new_num_comps=$((num_disks + 2))
vol1_new_num_comps=$vol0_new_num_comps
. tests/imsm-grow-template 0 0

View file

@ -0,0 +1,29 @@
. tests/env-imsm-template
# Grow both members from 2 disks to 5 disks
# POSITIVE test
num_disks=2
device_list="$dev0 $dev1"
spare_list="$dev2 $dev3 $dev4"
# Before: RAID 0 volume in slot #0, 2 disks, 64k chunk size
# RAID 0 volume in slot #1, 2 disks, 256k chunk size
vol0_level=0
vol0_comp_size=$((4 * 1024))
vol0_chunk=64
vol0_num_comps=$num_disks
vol0_offset=0
vol1_level=0
vol1_comp_size=$((6 * 1024))
vol1_chunk=256
vol1_num_comps=$num_disks
vol1_offset=$((vol0_comp_size + 4096))
# After: RAID 0 volume in slot #0, 5 disks, 64k chunk size
# RAID 0 volume in slot #1, 5 disks, 256k chunk size
vol0_new_num_comps=$((num_disks + 3))
vol1_new_num_comps=$vol0_new_num_comps
. tests/imsm-grow-template 0 0

View file

@ -0,0 +1,29 @@
. tests/env-imsm-template
# Grow a container (arrays inside) from 3 disks to 4 disks
# POSITIVE test
num_disks=3
device_list="$dev0 $dev1 $dev2"
spare_list="$dev3"
# Before: RAID 0 volume in slot #0, 3 disks, 128k chunk size
# RAID 0 volume in slot #1, 3 disks, 512k chunk size
vol0_level=0
vol0_comp_size=$((5 * 1024))
vol0_chunk=128
vol0_num_comps=$num_disks
vol0_offset=0
vol1_level=0
vol1_comp_size=$((5 * 1024))
vol1_chunk=128
vol1_num_comps=$num_disks
vol1_offset=$((vol0_comp_size + 4096))
# After: RAID0 volume in slot #0, 4 disks, 128k chunk size
# RAID0 volume in slot #1, 4 disks, 512k chunk size
vol0_new_num_comps=$((num_disks + 1))
vol1_new_num_comps=$vol0_new_num_comps
. tests/imsm-grow-template 0 0

View file

@ -0,0 +1,29 @@
. tests/env-imsm-template
# Grow the container (arrays inside) from 3 disks to 4 disks
# POSITIVE test
num_disks=3
device_list="$dev0 $dev1 $dev2"
spare_list="$dev3"
# Before: RAID 0 volume in slot #0, 3 disks, 64k chunk size
# RAID 5 volume in slot #1, 3 disks, 128k chunk size
vol0_level=0
vol0_comp_size=$((5 * 1024))
vol0_chunk=64
vol0_num_comps=$num_disks
vol0_offset=0
vol1_level=5
vol1_comp_size=$((5 * 1024))
vol1_chunk=128
vol1_num_comps=$((num_disks - 1))
vol1_offset=$((vol0_comp_size + 4096))
# After: RAID 0 volume in slot #0, 4 disks, 64k chunk size
# RAID 5 volume in slot #1, 4 disks, 128k chunk size
vol1_new_num_comps=$num_disks
vol0_new_num_comps=$((num_disks + 1))
. tests/imsm-grow-template 0 0

View file

@ -0,0 +1,29 @@
. tests/env-imsm-template
# Grow the container (arrays inside) from 3 disks to 5 disks
# POSITIVE test
num_disks=3
device_list="$dev0 $dev1 $dev2"
spare_list="$dev3 $dev4"
# Before: RAID 0 volume in slot #0, 3 disks, 256k chunk size
# RAID 5 volume in slot #1, 3 disks, 512k chunk size
vol0_level=0
vol0_comp_size=$((5 * 1024))
vol0_chunk=128
vol0_num_comps=$num_disks
vol0_offset=0
vol1_level=5
vol1_comp_size=$((5 * 1024))
vol1_chunk=128
vol1_num_comps=$((num_disks - 1))
vol1_offset=$((vol0_comp_size + 4096))
# After: RAID 0 volume in slot #0, 5 disks, 256k chunk size
# RAID 5 volume in slot #1, 5 disks, 512k chunk size
vol0_new_num_comps=$((num_disks + 2))
vol1_new_num_comps=$((num_disks + 1))
. tests/imsm-grow-template 0 0

View file

@ -0,0 +1,29 @@
. tests/env-imsm-template
# Grow the container (arrays inside) from 3 disks to 4 disks
# POSITIVE test
num_disks=3
device_list="$dev0 $dev1 $dev2"
spare_list="$dev3"
# Before: RAID 5 volume in slot #0, 3 disks, 64k chunk size
# RAID 0 volume in slot #1, 3 disks, 64k chunk size
vol0_level=5
vol0_comp_size=$((5 * 1024))
vol0_chunk=64
vol0_num_comps=$((num_disks - 1))
vol0_offset=0
vol1_level=0
vol1_comp_size=$((5 * 1024))
vol1_chunk=64
vol1_offset=$((vol0_comp_size + 4096))
vol1_num_comps=$num_disks
# After: RAID 5 volume in slot #0, 4 disks, 64k chunk size
# RAID 0 volume in slot #1, 4 disks, 64k chunk size
vol0_new_num_comps=$num_disks
vol1_new_num_comps=$((num_disks + 1))
. tests/imsm-grow-template 0 0

View file

@ -0,0 +1,29 @@
. tests/env-imsm-template
# Grow the container (arrays inside) from 3 disks to 5 disks
# POSITIVE test
num_disks=3
device_list="$dev0 $dev1 $dev2"
spare_list="$dev3 $dev4"
# Before: RAID 5 volume in slot #0, 3 disks, 128k chunk size
# RAID 0 volume in slot #1, 3 disks, 256k chunk size
vol0_level=5
vol0_comp_size=$((5 * 1024))
vol0_chunk=64
vol0_num_comps=$((num_disks - 1))
vol0_offset=0
vol1_level=0
vol1_comp_size=$((5 * 1024))
vol1_chunk=64
vol1_offset=$((vol0_comp_size + 4096))
vol1_num_comps=$num_disks
# After: RAID 5 volume in slot #0, 5 disks, 128k chunk size
# RAID 0 volume in slot #1, 5 disks, 256k chunk size
vol0_new_num_comps=$((num_disks + 1))
vol1_new_num_comps=$((num_disks + 2))
. tests/imsm-grow-template 0 0

View file

@ -0,0 +1,29 @@
. tests/env-imsm-template
# RAID 0 and RAID 5 volumes (3 disks) migrate to RAID 5 and RAID 5 volumes (4 disks)
# NEGATIVE test - migration is not allowed if there is more then one array in a container
num_disks=3
device_list="$dev0 $dev1 $dev2"
spare_list="$dev3"
# Before: RAID 0 volume, 3 disks, 64k chunk size, as member #0
vol0_level=0
vol0_comp_size=$((5 * 1024))
vol0_chunk=64
vol0_num_comps=$num_disks
vol0_offset=0
# Extra: RAID 5 volume, 3 disks, 64k chunk size, as member #1
vol1_level=5
vol1_comp_size=$((5 * 1024))
vol1_chunk=64
vol1_num_comps=$((num_disks - 1))
vol1_offset=$((vol0_comp_size + 4096))
# After: RAID 5 volume, 4 disks, 64k chunk size (only member #0)
vol0_new_level=5
vol0_new_num_comps=$num_disks
vol0_new_chunk=64
. tests/imsm-grow-template 1 1

View file

@ -0,0 +1,21 @@
. tests/env-imsm-template
# RAID 0 volume (3 disks, no spares) migrate to RAID 5 volume (3 disks)
# NEGATIVE test
num_disks=3
device_list="$dev0 $dev1 $dev2"
# Before: RAID 0 volume, 3 disks, 64k chunk size
vol0_level=0
vol0_comp_size=$((5 * 1024))
vol0_chunk=64
vol0_num_comps=$num_disks
vol0_offset=0
# After: RAID 5, 3 disks, 64k chunk size
vol0_new_level=5
vol0_new_num_comps=$((num_disks - 1))
vol0_new_chunk=64
. tests/imsm-grow-template 1

View file

@ -0,0 +1,30 @@
. tests/env-imsm-template
# Two RAID 0 volumes (2 disks) migrate to RAID 10 volume (4 disks)
# NEGATIVE test
num_disks=2
device_list="$dev0 $dev1"
# Before: RAID 0 volume in slot #0, 2 disks, 64k chunk size
# RAID 0 volume in slot #1, 2 disks, 64k chunk size
vol0_level=0
vol0_comp_size=$((5 * 1024))
vol0_chunk=64
vol0_num_comps=$num_disks
vol0_offset=0
# Before: RAID 0 volume, disks, 64k chunk size
vol1_level=0
vol1_comp_size=$((5 * 1024))
vol1_chunk=64
vol1_num_comps=num_disks
vol1_offset=$(( $vol0_comp_size + 4096 ))
# After: RAID 10, 4 disks, 64k chunk size
vol0_new_level=10
vol0_new_num_comps=$((num_disks - 1))
vol0_new_chunk=64
. tests/imsm-grow-template 1 1

Some files were not shown because too many files have changed in this diff Show more