📄 testsmoke.pm
字号:
#warning "$self->{base_command} @$tests"; #$SIG{PIPE} = 'IGNORE'; $SIG{PIPE} = sub { die "pipe broke" }; # start server { my $command = $self->{start_command}; my $log = ''; IPC::Run3::run3($command, undef, \$log, \$log); my $started_ok = ($log =~ /started/) ? 1 : 0; unless ($started_ok) { error "failed to start server\n $log"; exit 1; } } my $t_logs = $self->{test_config}->{vars}->{t_logs}; my @log_files = map { catfile $t_logs, $_ } qw(error_log access_log); $self->logs_init(@log_files); # run tests { my $command = $self->{run_command}; my $max_len = 1; for my $test (@$tests) { $max_len = length $test if length $test > $max_len; } for my $test (@$tests) { (my $test_name = $test) =~ s/\.t$//; my $fill = "." x ($max_len - length $test_name); $self->{total_tests_run}++; my $test_command = "$command $test"; my $log = ''; IPC::Run3::run3($test_command, undef, \$log, \$log); my $ok = ($log =~ /All tests successful/) ? 1 : 0; my @core_files_msg = $self->Apache::TestRun::scan_core_incremental(1); # if the test has caused core file(s) it's not ok $ok = 0 if @core_files_msg; if ($ok) { push @$ra_ok, $test; if ($self->{verbose}) { print STDERR "$test_name${fill}ok\n"; } # need to run log_diff to reset the position of the fh my %log_diffs = map { $_ => $self->log_diff($_) } @log_files; } else { push @$ra_nok, $test; $bad = $test; if ($self->{verbose}) { print STDERR "$test_name${fill}FAILED\n"; error sep("-"); # give server some time to finish the # logging. it's ok to wait long time since we have # to deal with an error sleep 5; my %log_diffs = map { $_ => $self->log_diff($_) } @log_files; # client log error "\t\t*** run log ***"; $log =~ s/^/ /mg; print STDERR "$log\n"; # server logs for my $path (@log_files) { next unless length $log_diffs{$path}; error "\t\t*** $path ***"; $log_diffs{$path} =~ s/^/ /mg; print STDERR "$log_diffs{$path}\n"; } } if (@core_files_msg) { unless ($self->{verbose}) { # currently the output of 'run log' already # includes the information about core files once # Test::Harness::Straps allows us to run callbacks # after each test, and we move back to run all # tests at once, we will log the message here error "$test_name caused core"; print STDERR join "\n", @core_files_msg, "\n"; } } if ($self->{verbose}) { error sep("-"); } unless ($self->{bug_mode}) { # normal smoke stop the run, but in the bug_mode # we want to complete all the tests last; } } } } $self->logs_end(); # stop server $self->kill_proc(); if ($self->{bug_mode}) { warning sep("-"); if (@$ra_nok == 0) { printf STDERR "All tests successful (%d)\n", scalar @$ra_ok; } else { error sprintf "error running %d tests out of %d\n", scalar(@$ra_nok), scalar @$ra_ok + @$ra_nok; } } else { return $bad; }}sub report_start { my($self) = shift; my $time = scalar localtime; $self->{start_time} = $time; $time =~ s/\s/_/g; $time =~ s/:/-/g; # winFU my $file = $self->{opts}->{report} || catfile Apache::Test::vars('top_dir'), "smoke-report-$time.txt"; $self->{runtime}->{report} = $file; info "Report file: $file"; open my $fh, ">$file" or die "cannot open $file for writing: $!"; $self->{fh} = $fh; my $sep = sep("-"); my $title = sep('=', "Special Tests Sequence Failure Finder Report"); print $fh <<EOM;$title$sepFirst iteration used:$self->{base_command} @{$self->{tests}}$sepEOM}sub report_success { my($self, $iter, $reduce_iter, $sequence, $tests) = @_; my @report = ("iteration $iter ($tests tests):\n", "\t$sequence\n", "(made $reduce_iter successful reductions)\n\n"); print @report; if (my $fh = $self->{fh}) { print $fh @report; }}sub report_finish { my($self) = @_; my $start_time = $self->{start_time}; my $end_time = scalar localtime; if (my $fh = delete $self->{fh}) { my $failures = scalar keys %{ $self->{results} }; my $sep = sep("-"); my $cfg_as_string = $self->build_config_as_string; my $unique_seqs = scalar keys %{ $self->{results} }; my $attempts = $self->{total_reduction_attempts}; my $successes = $self->{total_reduction_successes}; my $completion = $self->{smoking_completed} ? "Completed" : "Not Completed (aborted by user)"; my $status = "Unknown"; if ($self->{total_iterations} > 0) { if ($failures) { $status = "*** NOT OK ***"; } else { $status = "+++ OK +++"; } } my $title = sep('=', "Summary"); my $iter_made = sprintf "Iterations (%s) made : %d", $self->{order}, $self->{total_iterations}; print $fh <<EOM;$titleCompletion : $completionStatus : $statusTests run : $self->{total_tests_run}$iter_madeEOM if ($attempts > 0 && $failures) { my $reduction_stats = sprintf "%d/%d (%d%% success)", $attempts, $successes, $successes / $attempts * 100; print $fh <<EOM;Unique sequences found : $unique_seqsReduction tries/success : $reduction_statsEOM } print $fh <<EOM;$sep--- Started at: $start_time ------ Ended at: $end_time ---$sepThe smoke testing was run on the system with the followingparameters:$cfg_as_string-- this report was generated by $0EOM close $fh; }}# in case the smoke gets killed before it had a chance to finish and# write the report, at least we won't lose the last successful reduction# XXX: this wasn't needed before we switched to IPC::Run3, since# Ctrl-C would log the collected data, but it doesn't work with# IPC::Run3. So if that gets fixed, we can remove that functionsub log_successful_reduction { my($self, $iter, $tests) = @_; my $file = $self->{runtime}->{report} . ".$iter.temp"; debug "saving in $file"; open my $fh, ">$file" or die "cannot open $file for writing: $!"; print $fh join " ", @$tests; close $fh;}sub build_config_as_string { Apache::TestConfig::as_string();}sub kill_proc { my($self) = @_; my $command = $self->{stop_command}; my $log = ''; require IPC::Run3; IPC::Run3::run3($command, undef, \$log, \$log); my $stopped_ok = ($log =~ /shutdown/) ? 1 : 0; unless ($stopped_ok) { error "failed to stop server\n $log"; }}sub opt_help { my $self = shift; print <<EOM;usage: t/SMOKE [options ...] [tests] where the options are:EOM for (sort keys %usage){ printf " -%-16s %s\n", $_, $usage{$_}; } print <<EOM; if 'tests' argument is not provided all available tests will be runEOM}# generate t/SMOKE script (or a different filename) which will drive# Apache::TestSmokesub generate_script { my ($class, $file) = @_; $file ||= catfile 't', 'SMOKE'; my $content = join "\n", "BEGIN { eval { require blib && blib->import; } }", Apache::TestConfig->perlscript_header, "use $class;", "$class->new(\@ARGV)->run;"; Apache::Test::basic_config()->write_perlscript($file, $content);}1;__END__=head1 NAMEApache::TestSmoke - Special Tests Sequence Failure Finder=head1 SYNOPSIS # get the usage and the default values % t/SMOKE -help # repeat all tests 5 times and try 20 random iterations # and save the report into the file 'myreport' % t/SMOKE -times=5 -iterations=20 -report=myreport # run all tests default number of iterations, and repeat tests # default number of times % t/SMOKE # same as above but work only the specified tests % t/SMOKE foo/bar foo/tar # run once a sequence of tests in a non-random mode # e.g. when trying to reduce a known long sequence that fails % t/SMOKE -order=rotate -times=1 foo/bar foo/tar # show me each currently running test # it's not the same as running the tests in the verbose mode % t/SMOKE -verbose # run t/TEST, but show any problems after *each* tests is run # useful for bug reports (it actually runs t/TEST -start, then # t/TEST -run for each test separately and finally t/TEST -stop % t/SMOKE -bug_mode # now read the created report file=head1 DESCRIPTION=head2 The ProblemWhen we try to test a stateless machine (i.e. all tests areindependent), running all tests once ensures that all tested thingsproperly work. However when a state machine is tested (i.e. where arun of one test may influence another test) it's not enough to run allthe tests once to know that the tested features actually work. It'squite possible that if the same tests are run in a different orderand/or repeated a few times, some tests may fail. This usuallyhappens when some tests don't restore the system under test to itspristine state at the end of the run, which may influence other testswhich rely on the fact that they start on pristine state, when in factit's not true anymore. In fact it's possible that a single test mayfail when run twice or three times in a sequence.=head2 The SolutionTo reduce the possibility of such dependency errors, it's helpful torun random testing repeated many times with many different srandseeds. Of course if no failures get spotted that doesn't mean thatthere are no tests inter-dependencies, which may cause a failure inproduction. But random testing definitely helps to spot many problemsand can give better test coverage.=head2 Resolving Sequence ProblemsWhen this kind of testing is used and a failure is detected there aretwo problems:=over=item 1First is to be able to reproduce the problem so if we think we fixedit, we could verify the fix. This one is easy, just remember thesequence of tests run till the failed test and rerun the same sequenceonce again after the problem has been fixed.=item 2Second is to be able to understand the cause of the problem. If duringthe random test the failure has happened after running 400 tests, howcan we possibly know which previously running tests has caused to thefailure of the test 401. Chances are that most of the tests were cleanand don't have inter-dependency problem. Therefore it'd be veryhelpful if we could reduce the long sequence to a minimum. Preferably1 or 2 tests. That's when we can try to understand the cause of thedetected problem.=backThis utility attempts to solve both problems, and at the end of eachiteration print a minimal sequence of tests causing to a failure. Thisdoesn't always succeed, but works in many cases.This utility:=over=item 1Runs the tests randomly until the first failure is detected. Ornon-randomly if the option I<-order> is set to I<repeat> or I<rotate>.=item 2Then it tries to reduce that sequence of tests to a minimum, and thissequence still causes to the same failure.=item 3(XXX: todo): then it reruns the minimal sequence in the verbose modeand saves the output.=item 4It reports all the successful reductions as it goes to STDOUT andreport file of the format: smoke-report-<date>.txt.In addition the systems build parameters are logged into the reportfile, so the detected problems could be reproduced.=item 5Goto 1 and run again using a new random seed, which potentially shoulddetect different failures.=back=head1 Reduction AlgorithmCurrently for each reduction path, the following reduction algorithmsget applied:=over=item 1Binary search: first try the upper half then the lower.=item 2Random window: randomize the left item, then the right item and returnthe items between these two points.=back=head1 t/SMOKE.PLI<t/SMOKE.PL> is driving this module, if you don't have it, create it: #!perl use strict; use warnings FATAL => 'all'; use FindBin; use lib "$FindBin::Bin/../Apache-Test/lib"; use lib "$FindBin::Bin/../lib"; use Apache::TestSmoke (); Apache::TestSmoke->new(@ARGV)->run;usually I<Makefile.PL> converts it into I<t/SMOKE> while adjusting theperl path, but you create I<t/SMOKE> in first place as well.=head1 AUTHORStas Bekman=cut
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -