📄 replay-single.sh
字号:
test_25() { multiop_bg_pause $DIR/$tfile O_tSc || return 3 pid=$! rm -f $DIR/$tfile replay_barrier mds fail mds kill -USR1 $pid wait $pid || return 1 [ -e $DIR/$tfile ] && return 2 return 0}run_test 25 "open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"test_26() { replay_barrier mds multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5 pid1=$! multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6 pid2=$! rm -f $DIR/$tfile-1 rm -f $DIR/$tfile-2 kill -USR1 $pid2 wait $pid2 || return 1 fail mds kill -USR1 $pid1 wait $pid1 || return 2 [ -e $DIR/$tfile-1 ] && return 3 [ -e $DIR/$tfile-2 ] && return 4 return 0}run_test 26 "|X| open(O_CREAT), unlink two, close one, replay, close one (test mds_cleanup_orphans)"test_27() { replay_barrier mds multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5 pid1=$! multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6 pid2=$! rm -f $DIR/$tfile-1 rm -f $DIR/$tfile-2 fail mds kill -USR1 $pid1 wait $pid1 || return 1 kill -USR1 $pid2 wait $pid2 || return 2 [ -e $DIR/$tfile-1 ] && return 3 [ -e $DIR/$tfile-2 ] && return 4 return 0}run_test 27 "|X| open(O_CREAT), unlink two, replay, close two (test mds_cleanup_orphans)"test_28() { multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5 pid1=$! multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6 pid2=$! replay_barrier mds rm -f $DIR/$tfile-1 rm -f $DIR/$tfile-2 kill -USR1 $pid2 wait $pid2 || return 1 fail mds kill -USR1 $pid1 wait $pid1 || return 2 [ -e $DIR/$tfile-1 ] && return 3 [ -e $DIR/$tfile-2 ] && return 4 return 0}run_test 28 "open(O_CREAT), |X| unlink two, close one, replay, close one (test mds_cleanup_orphans)"test_29() { multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5 pid1=$! multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6 pid2=$! replay_barrier mds rm -f $DIR/$tfile-1 rm -f $DIR/$tfile-2 fail mds kill -USR1 $pid1 wait $pid1 || return 1 kill -USR1 $pid2 wait $pid2 || return 2 [ -e $DIR/$tfile-1 ] && return 3 [ -e $DIR/$tfile-2 ] && return 4 return 0}run_test 29 "open(O_CREAT), |X| unlink two, replay, close two (test mds_cleanup_orphans)"test_30() { multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5 pid1=$! multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6 pid2=$! rm -f $DIR/$tfile-1 rm -f $DIR/$tfile-2 replay_barrier mds fail mds kill -USR1 $pid1 wait $pid1 || return 1 kill -USR1 $pid2 wait $pid2 || return 2 [ -e $DIR/$tfile-1 ] && return 3 [ -e $DIR/$tfile-2 ] && return 4 return 0}run_test 30 "open(O_CREAT) two, unlink two, replay, close two (test mds_cleanup_orphans)"test_31() { multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5 pid1=$! multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6 pid2=$! rm -f $DIR/$tfile-1 replay_barrier mds rm -f $DIR/$tfile-2 fail mds kill -USR1 $pid1 wait $pid1 || return 1 kill -USR1 $pid2 wait $pid2 || return 2 [ -e $DIR/$tfile-1 ] && return 3 [ -e $DIR/$tfile-2 ] && return 4 return 0}run_test 31 "open(O_CREAT) two, unlink one, |X| unlink one, close two (test mds_cleanup_orphans)"# tests for bug 2104; completion without crashing is success. The close is# stale, but we always return 0 for close, so the app never sees it.test_32() { multiop_bg_pause $DIR/$tfile O_c || return 2 pid1=$! multiop_bg_pause $DIR/$tfile O_c || return 3 pid2=$! mds_evict_client df $MOUNT || sleep 1 && df $MOUNT || return 1 kill -USR1 $pid1 kill -USR1 $pid2 sleep 1 return 0}run_test 32 "close() notices client eviction; close() after client eviction"# Abort recovery before client completetest_33() { replay_barrier mds createmany -o $DIR/$tfile-%d 100 fail_abort mds # this file should be gone, because the replay was aborted $CHECKSTAT -t file $DIR/$tfile-* && return 3 unlinkmany $DIR/$tfile-%d 0 100 return 0}run_test 33 "abort recovery before client does replay"test_34() { multiop_bg_pause $DIR/$tfile O_c || return 2 pid=$! rm -f $DIR/$tfile replay_barrier mds fail_abort mds kill -USR1 $pid [ -e $DIR/$tfile ] && return 1 sync return 0}run_test 34 "abort recovery before client does replay (test mds_cleanup_orphans)"# bug 2278 - generate one orphan on OST, then destroy it during recovery from llog test_35() { touch $DIR/$tfile#define OBD_FAIL_MDS_REINT_NET_REP 0x119 do_facet mds "sysctl -w lustre.fail_loc=0x80000119" rm -f $DIR/$tfile & sleep 1 sync sleep 1 # give a chance to remove from MDS fail_abort mds $CHECKSTAT -t file $DIR/$tfile && return 1 || true}run_test 35 "test recovery from llog for unlink op"# b=2432 resent cancel after replay uses wrong cookie,# so don't resend cancelstest_36() { replay_barrier mds touch $DIR/$tfile checkstat $DIR/$tfile facet_failover mds cancel_lru_locks mdc if dmesg | grep "unknown lock cookie"; then echo "cancel after replay failed" return 1 fi}run_test 36 "don't resend cancel"# b=2368# directory orphans can't be unlinked from PENDING directorytest_37() { rmdir $DIR/$tfile 2>/dev/null multiop_bg_pause $DIR/$tfile dD_c || return 2 pid=$! rmdir $DIR/$tfile replay_barrier mds # clear the dmesg buffer so we only see errors from this recovery dmesg -c >/dev/null fail_abort mds kill -USR1 $pid dmesg | grep "mds_unlink_orphan.*error .* unlinking orphan" && return 1 sync return 0}run_test 37 "abort recovery before client does replay (test mds_cleanup_orphans for directories)"test_38() { createmany -o $DIR/$tfile-%d 800 unlinkmany $DIR/$tfile-%d 0 400 replay_barrier mds fail mds unlinkmany $DIR/$tfile-%d 400 400 sleep 2 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true}run_test 38 "test recovery from unlink llog (test llog_gen_rec) "test_39() { # bug 4176 createmany -o $DIR/$tfile-%d 800 replay_barrier mds unlinkmany $DIR/$tfile-%d 0 400 fail mds unlinkmany $DIR/$tfile-%d 400 400 sleep 2 ls -1f $DIR/$tfile-* $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true}run_test 39 "test recovery from unlink llog (test llog_gen_rec) "count_ost_writes() { awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }' $LPROC/osc/*/stats}#b=2477,2532test_40(){ $LCTL mark multiop $MOUNT/$tfile OS_c multiop $MOUNT/$tfile OS_c & PID=$! writeme -s $MOUNT/${tfile}-2 & WRITE_PID=$! sleep 1 facet_failover mds#define OBD_FAIL_MDS_CONNECT_NET 0x117 do_facet mds "sysctl -w lustre.fail_loc=0x80000117" kill -USR1 $PID stat1=`count_ost_writes` sleep $TIMEOUT stat2=`count_ost_writes` echo "$stat1, $stat2" if [ $stat1 -lt $stat2 ]; then echo "writes continuing during recovery" RC=0 else echo "writes not continuing during recovery, bug 2477" RC=4 fi echo "waiting for writeme $WRITE_PID" kill $WRITE_PID wait $WRITE_PID echo "waiting for multiop $PID" wait $PID || return 2 do_facet client munlink $MOUNT/$tfile || return 3 do_facet client munlink $MOUNT/${tfile}-2 || return 3 return $RC}run_test 40 "cause recovery in ptlrpc, ensure IO continues"#b=2814# make sure that a read to one osc doesn't try to double-unlock its page just# because another osc is invalid. trigger_group_io used to mistakenly return# an error if any oscs were invalid even after having successfully put rpcs# on valid oscs. This was fatal if the caller was ll_readpage who unlocked# the page, guarnateeing that the unlock from the RPC completion would# assert on trying to unlock the unlocked page.test_41() { [ $OSTCOUNT -lt 2 ] && \ skip "skipping test 41: we don't have a second OST to test with" && \ return local f=$MOUNT/$tfile # make sure the start of the file is ost1 lfs setstripe $f -s $((128 * 1024)) -i 0 do_facet client dd if=/dev/zero of=$f bs=4k count=1 || return 3 cancel_lru_locks osc # fail ost2 and read from ost1 local osc2dev=`grep ${ost2_svc}-osc- $LPROC/devices | awk '{print $1}'` [ "$osc2dev" ] || return 4 $LCTL --device $osc2dev deactivate || return 1 do_facet client dd if=$f of=/dev/null bs=4k count=1 || return 3 $LCTL --device $osc2dev activate || return 2 return 0}run_test 41 "read from a valid osc while other oscs are invalid"# test MDS recovery after ost failuretest_42() { blocks=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'` createmany -o $DIR/$tfile-%d 800 replay_barrier ost1 unlinkmany $DIR/$tfile-%d 0 400 debugsave sysctl -w lnet.debug=-1 facet_failover ost1 # osc is evicted, fs is smaller (but only with failout OSTs (bug 7287) #blocks_after=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'` #[ $blocks_after -lt $blocks ] || return 1 echo wait for MDS to timeout and recover sleep $((TIMEOUT * 2)) debugrestore unlinkmany $DIR/$tfile-%d 400 400 $CHECKSTAT -t file $DIR/$tfile-* && return 2 || true}run_test 42 "recovery after ost failure"# timeout in MDS/OST recovery RPC will LBUG MDStest_43() { # bug 2530 replay_barrier mds # OBD_FAIL_OST_CREATE_NET 0x204 do_facet ost1 "sysctl -w lustre.fail_loc=0x80000204" fail mds sleep 10 do_facet ost1 "sysctl -w lustre.fail_loc=0" return 0}run_test 43 "mds osc import failure during recovery; don't LBUG"test_44() { local at_max_saved=0 mdcdev=`awk '/-mdc-/ {print $1}' $LPROC/devices` [ "$mdcdev" ] || exit 2 # adaptive timeouts slow this way down if at_is_valid && at_is_enabled; then at_max_saved=$(at_max_get mds) at_max_set 40 mds fi for i in `seq 1 10`; do echo "$i of 10 ($(date +%s))" do_facet mds "grep service $LPROC/mdt/MDS/mds/timeouts" #define OBD_FAIL_TGT_CONN_RACE 0x701 do_facet mds "sysctl -w lustre.fail_loc=0x80000701" $LCTL --device $mdcdev recover df $MOUNT done do_facet mds "sysctl -w lustre.fail_loc=0" [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds return 0}run_test 44 "race in target handle connect"test_44b() { mdcdev=`awk '/-mdc-/ {print $1}' $LPROC/devices` [ "$mdcdev" ] || exit 2 for i in `seq 1 10`; do echo "$i of 10 ($(date +%s))" do_facet mds "grep service $LPROC/mdt/MDS/mds/timeouts" #define OBD_FAIL_TGT_DELAY_RECONNECT 0x704 do_facet mds "sysctl -w lustre.fail_loc=0x80000704" $LCTL --device $mdcdev recover df $MOUNT done do_facet mds "sysctl -w lustre.fail_loc=0" return 0}run_test 44b "race in target handle connect"# Handle failed closetest_45() { mdcdev=`awk '/-mdc-/ {print $1}' $LPROC/devices` [ "$mdcdev" ] || exit 2 $LCTL --device $mdcdev recover multiop_bg_pause $DIR/$tfile O_c || return 1 pid=$! # This will cause the CLOSE to fail before even # allocating a reply buffer $LCTL --device $mdcdev deactivate || return 4 # try the close kill -USR1 $pid wait $pid || return 1 $LCTL --device $mdcdev activate || return 5 sleep 1 $CHECKSTAT -t file $DIR/$tfile || return 2 return 0}run_test 45 "Handle failed close"test_46() { dmesg -c >/dev/null drop_reply "touch $DIR/$tfile" fail mds # ironically, the previous test, 45, will cause a real forced close, # so just look for one for this test dmesg | grep -i "force closing client file handle for $tfile" && return 1 return 0}run_test 46 "Don't leak file handle after open resend (3325)"test_47() { # bug 2824 # create some files to make sure precreate has been done on all # OSTs. (just in case this test is run independently) createmany -o $DIR/$tfile 20 || return 1 # OBD_FAIL_OST_CREATE_NET 0x204 fail ost1 do_facet ost1 "sysctl -w lustre.fail_loc=0x80000204" df $MOUNT || return 2 # let the MDS discover the OST failure, attempt to recover, fail # and recover again. sleep $((3 * TIMEOUT)) # Without 2824, this createmany would hang createmany -o $DIR/$tfile 20 || return 3 unlinkmany $DIR/$tfile 20 || return 4 do_facet ost1 "sysctl -w lustre.fail_loc=0" return 0}run_test 47 "MDS->OSC failure during precreate cleanup (2824)"test_48() { replay_barrier mds createmany -o $DIR/$tfile 20 || return 1 # OBD_FAIL_OST_EROFS 0x216 fail mds do_facet ost1 "sysctl -w lustre.fail_loc=0x80000216" df $MOUNT || return 2 createmany -o $DIR/$tfile 20 20 || return 2 unlinkmany $DIR/$tfile 40 || return 3 do_facet ost1 "sysctl -w lustre.fail_loc=0" return 0}run_test 48 "MDS->OSC failure during precreate cleanup (2824)"test_50() { local oscdev=`do_facet mds grep \'${ost1_svc}-osc \' $LPROC/devices | awk '{print $1}' | head -1` [ "$oscdev" ] || return 1 do_facet mds $LCTL --device $oscdev recover || return 2 do_facet mds $LCTL --device $oscdev recover || return 3 # give the mds_lov_sync threads a chance to run sleep 5}run_test 50 "Double OSC recovery, don't LASSERT (3812)"# b3764 timed out lock replaytest_52() { touch $DIR/$tfile cancel_lru_locks mdc multiop $DIR/$tfile s || return 1 replay_barrier mds#define OBD_FAIL_LDLM_REPLY 0x30c do_facet mds "sysctl -w lustre.fail_loc=0x8000030c" fail mds || return 2 do_facet mds "sysctl -w lustre.fail_loc=0x0" $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true}run_test 52 "time out lock replay (3764)"# bug 3462 - simultaneous MDC requeststest_53a() { mkdir -p $DIR/${tdir}-1 mkdir -p $DIR/${tdir}-2 multiop $DIR/${tdir}-1/f O_c & close_pid=$! # give multiop a chance to open sleep 1 #define OBD_FAIL_MDS_CLOSE_NET 0x115 do_facet mds "sysctl -w lustre.fail_loc=0x80000115" kill -USR1 $close_pid cancel_lru_locks MDC # force the close do_facet mds "sysctl -w lustre.fail_loc=0" mcreate $DIR/${tdir}-2/f || return 1 # close should still be here [ -d /proc/$close_pid ] || return 2 replay_barrier_nodf mds fail mds wait $close_pid || return 3 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5 rm -rf $DIR/${tdir}-*}run_test 53a "|X| close request while two MDC requests in flight"
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -