📄 recovery-small.sh
字号:
do_facet mds "sysctl -w lustre.fail_loc=0x80000119" touch $DIR/$tdir-2/f & sleep 1 do_facet mds "sysctl -w lustre.fail_loc=0" kill -USR1 $pid cancel_lru_locks mdc wait $pid || return 1 sleep $TIMEOUT $CHECKSTAT -t file $DIR/$tdir-1/f || return 2 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3 rm -rf $DIR/$tdir-*}run_test 21e "drop open reply while close and open are both in flight"test_21f() { mkdir -p $DIR/$tdir-1 mkdir -p $DIR/$tdir-2 multiop_bg_pause $DIR/$tdir-1/f O_c || return 1 pid=$! do_facet mds "sysctl -w lustre.fail_loc=0x80000119" touch $DIR/$tdir-2/f & sleep 1 do_facet mds "sysctl -w lustre.fail_loc=0" do_facet mds "sysctl -w lustre.fail_loc=0x80000122" kill -USR1 $pid cancel_lru_locks mdc wait $pid || return 1 do_facet mds "sysctl -w lustre.fail_loc=0" $CHECKSTAT -t file $DIR/$tdir-1/f || return 2 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3 rm -rf $DIR/$tdir-*}run_test 21f "drop both reply while close and open are both in flight"test_21g() { mkdir -p $DIR/$tdir-1 mkdir -p $DIR/$tdir-2 multiop_bg_pause $DIR/$tdir-1/f O_c || return 1 pid=$! do_facet mds "sysctl -w lustre.fail_loc=0x80000119" touch $DIR/$tdir-2/f & sleep 1 do_facet mds "sysctl -w lustre.fail_loc=0" do_facet mds "sysctl -w lustre.fail_loc=0x80000115" kill -USR1 $pid cancel_lru_locks mdc wait $pid || return 1 do_facet mds "sysctl -w lustre.fail_loc=0" $CHECKSTAT -t file $DIR/$tdir-1/f || return 2 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3 rm -rf $DIR/$tdir-*}run_test 21g "drop open reply and close request while close and open are both in flight"test_21h() { mkdir -p $DIR/$tdir-1 mkdir -p $DIR/$tdir-2 multiop_bg_pause $DIR/$tdir-1/f O_c || return 1 pid=$! do_facet mds "sysctl -w lustre.fail_loc=0x80000107" touch $DIR/$tdir-2/f & touch_pid=$! sleep 1 do_facet mds "sysctl -w lustre.fail_loc=0" do_facet mds "sysctl -w lustre.fail_loc=0x80000122" cancel_lru_locks mdc kill -USR1 $pid wait $pid || return 1 do_facet mds "sysctl -w lustre.fail_loc=0" wait $touch_pid || return 2 $CHECKSTAT -t file $DIR/$tdir-1/f || return 3 $CHECKSTAT -t file $DIR/$tdir-2/f || return 4 rm -rf $DIR/$tdir-*}run_test 21h "drop open request and close reply while close and open are both in flight"# bug 3462 - multiple MDC requeststest_22() { f1=$DIR/${tfile}-1 f2=$DIR/${tfile}-2 do_facet mds "sysctl -w lustre.fail_loc=0x80000115" multiop $f2 Oc & close_pid=$! sleep 1 multiop $f1 msu || return 1 cancel_lru_locks mdc do_facet mds "sysctl -w lustre.fail_loc=0" wait $close_pid || return 2 rm -rf $f2 || return 4}run_test 22 "drop close request and do mknod"test_23() { #b=4561 multiop_bg_pause $DIR/$tfile O_c || return 1 pid=$! # give a chance for open sleep 5 # try the close drop_request "kill -USR1 $pid" fail mds wait $pid || return 1 return 0}run_test 23 "client hang when close a file after mds crash"test_24() { # bug 2248 - eviction fails writeback but app doesn't see it mkdir -p $DIR/$tdir cancel_lru_locks osc multiop_bg_pause $DIR/$tdir/$tfile Owy_wyc || return 1 MULTI_PID=$! ost_evict_client kill -USR1 $MULTI_PID wait $MULTI_PID rc=$? sysctl -w lustre.fail_loc=0x0 client_reconnect [ $rc -eq 0 ] && error_ignore 5494 "multiop didn't fail fsync: rc $rc" || true}run_test 24 "fsync error (should return error)"test_26a() { # was test_26 bug 5921 - evict dead exports by pinger# this test can only run from a client on a separate node. remote_ost || { skip "local OST" && return 0; } remote_mds || { skip "local MDS" && return 0; } OST_FILE=obdfilter.${ost1_svc}.num_exports OST_EXP="`do_facet ost1 lctl get_param -n $OST_FILE`" OST_NEXP1=`echo $OST_EXP | cut -d' ' -f2` echo starting with $OST_NEXP1 OST exports# OBD_FAIL_PTLRPC_DROP_RPC 0x505 do_facet client sysctl -w lustre.fail_loc=0x505 # evictor takes up to 2.25x to evict. But if there's a # race to start the evictor from various obds, the loser # might have to wait for the next ping. echo Waiting for $(($TIMEOUT * 4)) secs sleep $(($TIMEOUT * 4)) OST_EXP="`do_facet ost1 lctl get_param -n $OST_FILE`" OST_NEXP2=`echo $OST_EXP | cut -d' ' -f2` echo ending with $OST_NEXP2 OST exports do_facet client sysctl -w lustre.fail_loc=0x0 [ $OST_NEXP1 -le $OST_NEXP2 ] && error "client not evicted" return 0}run_test 26a "evict dead exports"test_26b() { # bug 10140 - evict dead exports by pinger client_df zconf_mount `hostname` $MOUNT2 || error "Failed to mount $MOUNT2" MDS_FILE=mds.${mds_svc}.num_exports MDS_NEXP1="`do_facet mds lctl get_param -n $MDS_FILE | cut -d' ' -f2`" OST_FILE=obdfilter.${ost1_svc}.num_exports OST_NEXP1="`do_facet ost1 lctl get_param -n $OST_FILE | cut -d' ' -f2`" echo starting with $OST_NEXP1 OST and $MDS_NEXP1 MDS exports #force umount a client; exports should get evicted zconf_umount `hostname` $MOUNT2 -f # evictor takes PING_EVICT_TIMEOUT + 3 * PING_INTERVAL to evict. # But if there's a race to start the evictor from various obds, # the loser might have to wait for the next ping. echo Waiting for $(($TIMEOUT * 3)) secs sleep $(($TIMEOUT * 3)) OST_NEXP2="`do_facet ost1 lctl get_param -n $OST_FILE | cut -d' ' -f2`" MDS_NEXP2="`do_facet mds lctl get_param -n $MDS_FILE | cut -d' ' -f2`" echo ending with $OST_NEXP2 OST and $MDS_NEXP2 MDS exports [ $OST_NEXP1 -le $OST_NEXP2 ] && error "client not evicted from OST" [ $MDS_NEXP1 -le $MDS_NEXP2 ] && error "client not evicted from MDS" return 0}run_test 26b "evict dead exports"test_27() { remote_mds && { skip "remote MDS" && return 0; } mkdir -p $DIR/$tdir writemany -q -a $DIR/$tdir/$tfile 0 5 & CLIENT_PID=$! sleep 1 FAILURE_MODE="SOFT" facet_failover mds#define OBD_FAIL_OSC_SHUTDOWN 0x407 sysctl -w lustre.fail_loc=0x80000407 # need to wait for reconnect echo -n waiting for fail_loc while [ `sysctl -n lustre.fail_loc` -eq -2147482617 ]; do sleep 1 echo -n . done facet_failover mds #no crashes allowed! kill -USR1 $CLIENT_PID wait $CLIENT_PID true}run_test 27 "fail LOV while using OSC's"test_28() { # bug 6086 - error adding new clients do_facet client mcreate $MOUNT/$tfile || return 1 drop_bl_callback "chmod 0777 $MOUNT/$tfile" ||echo "evicted as expected" #define OBD_FAIL_MDS_ADD_CLIENT 0x12f do_facet mds sysctl -w lustre.fail_loc=0x8000012f # fail once (evicted), reconnect fail (fail_loc), ok df || (sleep 1; df) || (sleep 1; df) || error "reconnect failed" rm -f $MOUNT/$tfile fail mds # verify MDS last_rcvd can be loaded}run_test 28 "handle error adding new clients (bug 6086)"test_50() { mkdir -p $DIR/$tdir debugsave sysctl -w lnet.debug="-dlmtrace -ha" # put a load of file creates/writes/deletes writemany -q $DIR/$tdir/$tfile 0 5 & CLIENT_PID=$! echo writemany pid $CLIENT_PID sleep 10 FAILURE_MODE="SOFT" $LCTL mark "$TESTNAME fail mds 1" fail mds # wait for client to reconnect to MDS sleep 60 $LCTL mark "$TESTNAME fail mds 2" fail mds sleep 60 $LCTL mark "$TESTNAME fail mds 3" fail mds # client process should see no problems even though MDS went down sleep $TIMEOUT kill -USR1 $CLIENT_PID wait $CLIENT_PID rc=$? echo writemany returned $rc #these may fail because of eviction due to slow AST response. debugrestore [ $rc -eq 0 ] || error_ignore 13652 "writemany returned rc $rc" || true}run_test 50 "failover MDS under load"test_51() { mkdir -p $DIR/$tdir # put a load of file creates/writes/deletes writemany -q $DIR/$tdir/$tfile 0 5 & CLIENT_PID=$! sleep 1 FAILURE_MODE="SOFT" facet_failover mds # failover at various points during recovery SEQ="1 5 10 $(seq $TIMEOUT 5 $(($TIMEOUT+10)))" echo will failover at $SEQ for i in $SEQ do echo failover in $i sec sleep $i $LCTL mark "$TESTNAME fail mds $i" facet_failover mds done # client process should see no problems even though MDS went down # and recovery was interrupted sleep $TIMEOUT kill -USR1 $CLIENT_PID wait $CLIENT_PID rc=$? echo writemany returned $rc [ $rc -eq 0 ] || error_ignore 13652 "writemany returned rc $rc" || true}run_test 51 "failover MDS during recovery"test_52_guts() { do_facet client "writemany -q -a $DIR/$tdir/$tfile 300 5" & CLIENT_PID=$! echo writemany pid $CLIENT_PID sleep 10 FAILURE_MODE="SOFT" $LCTL mark "$TESTNAME fail ost $1" fail ost1 rc=0 wait $CLIENT_PID || rc=$? # active client process should see an EIO for down OST [ $rc -eq 5 ] && { echo "writemany correctly failed $rc" && return 0; } # but timing or failover setup may allow success [ $rc -eq 0 ] && { echo "writemany succeeded" && return 0; } echo "writemany returned $rc" return $rc}test_52() { mkdir -p $DIR/$tdir test_52_guts 1 rc=$? [ $rc -ne 0 ] && { return $rc; } # wait for client to reconnect to OST sleep 30 test_52_guts 2 rc=$? [ $rc -ne 0 ] && { return $rc; } sleep 30 test_52_guts 3 rc=$? client_reconnect #return $rc}run_test 52 "failover OST under load"# test of open reconstructtest_53() { touch $DIR/$tfile drop_ldlm_reply "openfile -f O_RDWR:O_CREAT -m 0755 $DIR/$tfile" ||\ return 2}run_test 53 "touch: drop rep"test_54() { zconf_mount `hostname` $MOUNT2 touch $DIR/$tfile touch $DIR2/$tfile.1 sleep 10 cat $DIR2/$tfile.missing # save transno = 0, rc != 0 into last_rcvd fail mds umount $MOUNT2 ERROR=`dmesg | egrep "(test 54|went back in time)" | tail -n1 | grep "went back in time"` [ x"$ERROR" == x ] || error "back in time occured"}run_test 54 "back in time"# bug 11330 - liblustre application death during I/O locks up OSTtest_55() { remote_ost && { skip "remote OST" && return 0; } mkdir -p $DIR/$tdir # first dd should be finished quickly dd if=/dev/zero of=$DIR/$tdir/$tfile-1 bs=32M count=4 & DDPID=$! count=0 echo "step1: testing ......" while [ true ]; do if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi count=$[count+1] if [ $count -gt 64 ]; then error "dd should be finished!" fi sleep 1 done echo "(dd_pid=$DDPID, time=$count)successful" #define OBD_FAIL_OST_DROP_REQ 0x21d do_facet ost sysctl -w lustre.fail_loc=0x0000021d # second dd will be never finished dd if=/dev/zero of=$DIR/$tdir/$tfile-2 bs=32M count=4 & DDPID=$! count=0 echo "step2: testing ......" while [ $count -le 64 ]; do dd_name="`ps x | awk '$1 == '$DDPID' { print $5 }'`" if [ -z $dd_name ]; then ls -l $DIR/$tdir echo "debug: (dd_name=$dd_name, dd_pid=$DDPID, time=$count)" error "dd shouldn't be finished!" fi count=$[count+1] sleep 1 done echo "(dd_pid=$DDPID, time=$count)successful" #Recover fail_loc and dd will finish soon do_facet ost sysctl -w lustre.fail_loc=0 count=0 echo "step3: testing ......" while [ true ]; do if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi count=$[count+1] if [ $count -gt 500 ]; then error "dd should be finished!" fi sleep 1 done echo "(dd_pid=$DDPID, time=$count)successful" rm -rf $DIR/$tdir}run_test 55 "ost_brw_read/write drops timed-out read/write request"test_56() { # b=11277#define OBD_FAIL_MDS_RESEND 0x136 touch $DIR/$tfile do_facet mds sysctl -w lustre.fail_loc=0x80000136 stat $DIR/$tfile do_facet mds sysctl -w lustre.fail_loc=0 rm -f $DIR/$tfile}run_test 56 "do not allow reconnect to busy exports"test_57_helper() { # no oscs means no client or mdt while lctl get_param osc.*.* > /dev/null 2>&1; do : # loop until proc file is removed done}test_57() { # bug 10866 test_57_helper & pid=$! sleep 1#define OBD_FAIL_LPROC_REMOVE 0xB00 sysctl -w lustre.fail_loc=0x80000B00 zconf_umount `hostname` $DIR sysctl -w lustre.fail_loc=0x80000B00 fail_abort mds kill -9 $pid sysctl -w lustre.fail_loc=0 mount_client $DIR do_facet client "df $DIR"}run_test 57 "read procfs entries causes kernel crash"test_58() { # bug 11546#define OBD_FAIL_MDC_ENQUEUE_PAUSE 0x801 touch $MOUNT/$tfile ls -la $MOUNT/$tfile sysctl -w lustre.fail_loc=0x80000801 cp $MOUNT/$tfile /dev/null & pid=$! sleep 1 sysctl -w lustre.fail_loc=0 drop_bl_callback rm -f $MOUNT/$tfile wait $pid do_facet client "df $DIR"}run_test 58 "Eviction in the middle of open RPC reply processing"test_59() { # bug 10589 zconf_mount `hostname` $MOUNT2 || error "Failed to mount $MOUNT2" echo $DIR2 | grep -q $MOUNT2 || error "DIR2 is not set properly: $DIR2" sysctl -w lustre.fail_loc=0x311 writes=$(LANG=C dd if=/dev/zero of=$DIR2/$tfile count=1 2>&1) [ $? = 0 ] || error "dd write failed" writes=$(echo $writes | awk -F '+' '/out/ {print $1}') sysctl -w lustre.fail_loc=0 sync zconf_umount `hostname` $MOUNT2 -f reads=$(LANG=C dd if=$DIR/$tfile of=/dev/null 2>&1) [ $? = 0 ] || error "dd read failed" reads=$(echo $reads | awk -F '+' '/in/ {print $1}') [ "$reads" -eq "$writes" ] || error "read" $reads "blocks, must be" $writes}run_test 59 "Read cancel race on client eviction"equals_msg `basename $0`: test complete, cleaning upcheck_and_cleanup_lustre[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG || true
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -