1
0
Fork 0

Adding upstream version 4.3+20241108.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-14 06:35:11 +01:00
parent 3ea4f9a80b
commit 07a108cefd
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
64 changed files with 2015 additions and 1768 deletions

View file

@ -13,6 +13,10 @@ check raid5
testdev $md0 3 19456 512
mdadm -G $md0 -l0
check wait; sleep 1
while ps auxf | grep "mdadm -G" | grep -v grep
do
sleep 1
done
check raid0
testdev $md0 3 19456 512
mdadm -G $md0 -l5 --add $dev3 $dev4

View file

@ -10,7 +10,6 @@ export MDADM_GROW_VERIFY=1
dotest() {
sleep 2
check wait
testdev $md0 $1 19968 64 nd
blockdev --flushbufs $md0
cmp -s -n $[textK*1024] $md0 /tmp/RandFile || { echo cmp failed; exit 2; }
# write something new - shift chars 4 space
@ -24,7 +23,7 @@ checkgeo() {
# level raid_disks chunk_size layout
dev=$1
shift
sleep 0.5
sleep 15
check wait
sleep 1
for attr in level raid_disks chunk_size layout
@ -43,22 +42,25 @@ checkgeo() {
bu=/tmp/md-test-backup
rm -f $bu
mdadm -CR $md0 -l1 -n2 -x1 $dev0 $dev1 $dev2 -z 19968
testdev $md0 1 $mdsize1a 64
mdadm -CR $md0 -l1 -n2 -x1 $dev0 $dev1 $dev2
[ -b $md0 ] || die "$1 isn't a block device."
dd if=/tmp/RandFile of=$md0
dotest 1
mdadm --grow $md0 -l5 -n3 --chunk 64
mdadm --grow $md0 -l5 -n3
checkgeo md0 raid5 3
dotest 2
mdadm $md0 --add $dev3 $dev4
mdadm --grow $md0 -n4 --chunk 32
checkgeo md0 raid5 4 $[32*1024]
dotest 3
mdadm -G $md0 -l6 --backup-file $bu
checkgeo md0 raid6 5 $[32*1024]
dotest 3
mdadm -G /dev/md0 --array-size 39936
mdadm -G /dev/md0 --array-size 37888
mdadm -G $md0 -n4 --backup-file $bu
checkgeo md0 raid6 4 $[32*1024]
dotest 2
@ -67,14 +69,11 @@ mdadm -G $md0 -l5 --backup-file $bu
checkgeo md0 raid5 3 $[32*1024]
dotest 2
mdadm -G /dev/md0 --array-size 19968
mdadm -G /dev/md0 --array-size 18944
mdadm -G $md0 -n2 --backup-file $bu
checkgeo md0 raid5 2 $[32*1024]
dotest 1
mdadm -G --level=1 $md0
dotest 1
# now repeat that last few steps only with a degraded array.
mdadm -S $md0
mdadm -CR $md0 -l6 -n5 $dev0 $dev1 $dev2 $dev3 $dev4
@ -83,7 +82,7 @@ dotest 3
mdadm $md0 --fail $dev0
mdadm -G /dev/md0 --array-size 37888
mdadm -G /dev/md0 --array-size 35840
mdadm -G $md0 -n4 --backup-file $bu
dotest 2
checkgeo md0 raid6 4 $[512*1024]
@ -103,12 +102,10 @@ dotest 2
mdadm -G $md0 -l5 --backup-file $bu
dotest 2
mdadm -G /dev/md0 --array-size 18944
mdadm -G /dev/md0 --array-size 17920
mdadm -G $md0 -n2 --backup-file $bu
dotest 1
checkgeo md0 raid5 2 $[512*1024]
mdadm $md0 --fail $dev2
mdadm -G --level=1 $md0
dotest 1
checkgeo md0 raid1 2
mdadm -S $md0

View file

@ -1,9 +0,0 @@
always fails
Fails with errors:
mdadm: /dev/loop0 is smaller than given size. 18976K < 19968K + metadata
mdadm: /dev/loop1 is smaller than given size. 18976K < 19968K + metadata
mdadm: /dev/loop2 is smaller than given size. 18976K < 19968K + metadata
ERROR: /dev/md0 isn't a block device.

View file

@ -1,45 +0,0 @@
always fails
This patch, recently added to md-next causes the test to always fail:
7e6ba434cc60 ("md: don't unregister sync_thread with reconfig_mutex
held")
The new error is simply:
ERROR: no reshape happening
Before the patch, the error seen is below.
--
fails infrequently
Fails roughly 1 in 4 runs with errors:
mdadm: Merging with already-assembled /dev/md/0
mdadm: cannot re-read metadata from /dev/loop6 - aborting
ERROR: no reshape happening
Also have seen a random deadlock:
INFO: task mdadm:109702 blocked for more than 30 seconds.
Not tainted 5.18.0-rc3-eid-vmlocalyes-dbg-00095-g3c2b5427979d #2040
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:mdadm state:D stack: 0 pid:109702 ppid: 1 flags:0x00004000
Call Trace:
<TASK>
__schedule+0x67e/0x13b0
schedule+0x82/0x110
mddev_suspend+0x2e1/0x330
suspend_lo_store+0xbd/0x140
md_attr_store+0xcb/0x130
sysfs_kf_write+0x89/0xb0
kernfs_fop_write_iter+0x202/0x2c0
new_sync_write+0x222/0x330
vfs_write+0x3bc/0x4d0
ksys_write+0xd9/0x180
__x64_sys_write+0x43/0x50
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae

View file

@ -4,6 +4,7 @@
# kernel md code to move data into and out of variously
# shaped md arrays.
set -x
dir="."
layouts=(la ra ls rs)
for level in 5 6
do

View file

@ -1,12 +0,0 @@
always fails
Test seems to run 'test_stripe' at $dir directory, but $dir is never
set. If $dir is adjusted to $PWD, the test still fails with:
mdadm: /dev/loop2 is not suitable for this array.
mdadm: create aborted
++ return 1
++ cmp -s -n 8192 /dev/md0 /tmp/RandFile
++ echo cmp failed
cmp failed
++ exit 2

View file

@ -1,6 +0,0 @@
fails infrequently
Fails roughly 1 in 10 runs with errors:
mdadm: /dev/loop2 is still in use, cannot remove.
/dev/loop2 removal from /dev/md/container should have succeeded

View file

@ -7,12 +7,12 @@ vol0_num_comps=1
vol0_comp_size=$((10 * 1024))
# Create container
mdadm --create --run $container --auto=md --metadata=imsm --force --raid-disks=$vol0_num_comps $dev0
mdadm --create --run $container --metadata=imsm --force --raid-disks=$vol0_num_comps $dev0
check wait
imsm_check container $vol0_num_comps
# Create RAID 0 volume
mdadm --create --run $member0 --auto=md --level=0 --size=$vol0_comp_size --chunk=64 --force --raid-disks=$vol0_num_comps $dev0
mdadm --create --run $member0 --level=0 --size=$vol0_comp_size --chunk=64 --force --raid-disks=$vol0_num_comps $dev0
check wait
# Test the member

View file

@ -362,6 +362,10 @@ check() {
do
sleep 0.5
done
while ps auxf | grep "mdadm --grow --continue" | grep -v grep
do
sleep 1
done
echo $min > /proc/sys/dev/raid/speed_limit_min
echo $max > /proc/sys/dev/raid/speed_limit_max
;;

View file

@ -37,24 +37,24 @@ function grow_member() {
}
# Create container
mdadm --create --run $container --auto=md --metadata=imsm --raid-disks=$num_disks $device_list
mdadm --create --run $container --metadata=imsm --raid-disks=$num_disks $device_list
check wait
imsm_check container $num_disks
# Create first volume inside the container
if [[ ! -z $vol0_chunk ]]; then
mdadm --create --run $member0 --auto=md --level=$vol0_level --size=$vol0_comp_size --chunk=$vol0_chunk --raid-disks=$num_disks $device_list
mdadm --create --run $member0 --level=$vol0_level --size=$vol0_comp_size --chunk=$vol0_chunk --raid-disks=$num_disks $device_list
else
mdadm --create --run $member0 --auto=md --level=$vol0_level --size=$vol0_comp_size --raid-disks=$num_disks $device_list
mdadm --create --run $member0 --level=$vol0_level --size=$vol0_comp_size --raid-disks=$num_disks $device_list
fi
check wait
# Create second volume inside the container (if defined)
if [ ! -z $vol1_level ]; then
if [ ! -z $vol1_chunk ]; then
mdadm --create --run $member1 --auto=md --level=$vol1_level --size=$vol1_comp_size --chunk=$vol1_chunk --raid-disks=$num_disks $device_list
mdadm --create --run $member1 --level=$vol1_level --size=$vol1_comp_size --chunk=$vol1_chunk --raid-disks=$num_disks $device_list
else
mdadm --create --run $member1 --auto=md --level=$vol1_level --size=$vol1_comp_size --raid-disks=$num_disks $device_list
mdadm --create --run $member1 --level=$vol1_level --size=$vol1_comp_size --raid-disks=$num_disks $device_list
fi
check wait
fi