📄 test.tcl
字号:
error $theError; } } set is_envmethod 0 }}## Run method tests, each in its own, new environment. (As opposed to# run_envmethod1 which runs all the tests in a single environment.)#proc run_envmethod { method {start 1} {stop 0} {display 0} {run 1} \ {outfile stdout } { largs "" } } { global __debug_on global __debug_print global __debug_test global is_envmethod global num_test global parms source ./include.tcl set stopsdb $num_test(sdb) if { $stop == 0 } { set stop $num_test(test) } else { if { $stopsdb > $stop } { set stopsdb $stop } } set save_largs $largs env_cleanup $testdir if { $display == 1 } { for { set i $start } { $i <= $stop } { incr i } { puts $outfile "eval run_envmethod $method \ $i $i 0 1 stdout $largs" } } if { $run == 1 } { set is_envmethod 1 # # Run both subdb and normal tests for as long as there are # some of each type. Start with the subdbs: set stat [catch { for { set i $start } { $i <= $stopsdb } {incr i} { check_handles set env [eval {berkdb_env -create -txn \ -mode 0644 -home $testdir}] error_check_good env_open \ [is_valid_env $env] TRUE append largs " -env $env " puts "[timestamp]" set name [format "subdb%03d" $i] if { [info exists parms($name)] != 1 } { puts stderr \ "[format Subdb%03d $i] disabled in\ testparams.tcl; skipping." continue } eval $name $method $parms($name) $largs error_check_good envclose [$env close] 0 error_check_good envremove [berkdb envremove \ -home $testdir] 0 flush stdout flush stderr set largs $save_largs } } res] if { $stat != 0} { global errorInfo; set fnl [string first "\n" $errorInfo] set theError [string range $errorInfo 0 [expr $fnl - 1]] if {[string first FAIL $errorInfo] == -1} { error "FAIL:[timestamp]\ run_envmethod: $method $i: $theError" } else { error $theError; } } # Subdb tests are done, now run through the regular tests: set stat [catch { for { set i $start } { $i <= $stop } {incr i} { check_handles set env [eval {berkdb_env -create -txn \ -mode 0644 -home $testdir}] error_check_good env_open \ [is_valid_env $env] TRUE append largs " -env $env " puts "[timestamp]" set name [format "test%03d" $i] if { [info exists parms($name)] != 1 } { puts stderr \ "[format Test%03d $i] disabled in\ testparams.tcl; skipping." continue } eval $name $method $parms($name) $largs if { $__debug_print != 0 } { puts "" } if { $__debug_on != 0 } { debug $__debug_test } flush stdout flush stderr set largs $save_largs error_check_good envclose [$env close] 0 error_check_good envremove [berkdb envremove \ -home $testdir] 0 } } res] if { $stat != 0} { global errorInfo; set fnl [string first "\n" $errorInfo] set theError [string range $errorInfo 0 [expr $fnl - 1]] if {[string first FAIL $errorInfo] == -1} { error "FAIL:[timestamp]\ run_envmethod: $method $i: $theError" } else { error $theError; } } set is_envmethod 0 }}proc subdb { method {start 1} {stop 0} {display 0} {run 1} \ {outfile stdout} args} { global num_test testdir global parms for { set i $start } { $i <= $stop } {incr i} { set name [format "subdb%03d" $i] if { [info exists parms($name)] != 1 } { puts stderr "[format Subdb%03d $i] disabled in\ testparams.tcl; skipping." continue } if { $display } { puts -nonewline $outfile "eval $name $method" puts -nonewline $outfile " $parms($name) $args;" puts $outfile "verify_dir $testdir \"\" 1" } if { $run } { check_handles $outfile eval $name $method $parms($name) $args verify_dir $testdir "" 1 } flush stdout flush stderr }}proc run_recd { method {start 1} {stop 0} {run 1} {display 0} args } { global __debug_on global __debug_print global __debug_test global parms global num_test global log_log_record_types source ./include.tcl if { $stop == 0 } { set stop $num_test(recd) } if { $run == 1 } { puts "run_recd: $method $start $stop $args" } if {[catch { for { set i $start } { $i <= $stop } {incr i} { set name [format "recd%03d" $i] if { [info exists parms($name)] != 1 } { puts stderr "[format Recd%03d $i] disabled in\ testparams.tcl; skipping." continue } if { $display } { puts "eval $name $method $parms($name) $args" } if { $run } { check_handles puts "[timestamp]" # By redirecting stdout to stdout, we make exec # print output rather than simply returning it. # By redirecting stderr to stdout too, we make # sure everything winds up in the ALL.OUT file. set ret [catch { exec $tclsh_path << \ "source $test_path/test.tcl; \ set log_log_record_types \ $log_log_record_types; eval $name \ $method $parms($name) $args" \ >&@ stdout } res] # Don't die if the test failed; we want # to just proceed. if { $ret != 0 } { puts "FAIL:[timestamp] $res" } if { $__debug_print != 0 } { puts "" } if { $__debug_on != 0 } { debug $__debug_test } flush stdout flush stderr } } } res] != 0} { global errorInfo; set fnl [string first "\n" $errorInfo] set theError [string range $errorInfo 0 [expr $fnl - 1]] if {[string first FAIL $errorInfo] == -1} { error "FAIL:[timestamp]\ run_recd: $method $i: $theError" } else { error $theError; } }}proc run_recds { {run 1} {display 0} args } { global log_log_record_types set log_log_record_types 1 logtrack_init foreach method \ "btree rbtree hash queue queueext recno frecno rrecno" { check_handles if { [catch {eval \ run_recd -$method 1 0 $run $display $args} ret ] != 0 } { puts $ret } } if { $run } { logtrack_summary } set log_log_record_types 0}proc run_all { args } { global num_test source ./include.tcl fileremove -f ALL.OUT set exflgs [eval extractflags $args] set flags [lindex $exflgs 1] set display 1 set run 1 set am_only 0 set parallel 0 set nparalleltests 0 set rflags {--} foreach f $flags { switch $f { m { set am_only 1 } n { set display 1 set run 0 set rflags [linsert $rflags 0 "-n"] } } } set o [open ALL.OUT a] if { $run == 1 } { puts -nonewline "Test suite run started at: " puts [clock format [clock seconds] -format "%H:%M %D"] puts [berkdb version -string] puts -nonewline $o "Test suite run started at: " puts $o [clock format [clock seconds] -format "%H:%M %D"] puts $o [berkdb version -string] } close $o # # First run standard tests. Send in a -A to let run_std know # that it is part of the "run_all" run, so that it doesn't # print out start/end times. # lappend args -A eval {run_std} $args set test_pagesizes [get_test_pagesizes] set args [lindex $exflgs 0] set save_args $args foreach pgsz $test_pagesizes { set args $save_args append args " -pagesize $pgsz -chksum" if { $am_only == 0 } { # Run recovery tests. # # XXX These don't actually work at multiple pagesizes; # disable them for now. # # XXX These too are broken into separate tclsh # instantiations so we don't require so much # memory, but I think it's cleaner # and more useful to do it down inside proc r than here, # since "r recd" gets done a lot and needs to work. # # XXX See comment in run_std for why this only directs # stdout and not stderr. Don't worry--the right stuff # happens. #puts "Running recovery tests with pagesize $pgsz" #if [catch {exec $tclsh_path \ # << "source $test_path/test.tcl; \ # r $rflags recd $args" \ # 2>@ stderr >> ALL.OUT } res] { # set o [open ALL.OUT a] # puts $o "FAIL: recd test:" # puts $o $res # close $o #} } # Access method tests. # # XXX # Broken up into separate tclsh instantiations so # we don't require so much memory. foreach i \ "btree rbtree hash queue queueext recno frecno rrecno" { puts "Running $i tests with pagesize $pgsz" for { set j 1 } { $j <= $num_test(test) } {incr j} { if { $run == 0 } { set o [open ALL.OUT a] eval {run_method -$i $j $j $display \ $run $o} $args close $o } if { $run } { if [catch {exec $tclsh_path \ << "source $test_path/test.tcl; \ eval {run_method -$i $j $j \ $display $run stdout} $args" \ >>& ALL.OUT } res] { set o [open ALL.OUT a] puts $o \ "FAIL: [format \ "test%03d" $j] $i" close $o } } } # # Run subdb tests with varying pagesizes too. # for { set j 1 } { $j <= $num_test(sdb) } {incr j} { if { $run == 0 } { set o [open ALL.OUT a] eval {subdb -$i $j $j $display \ $run $o} $args close $o } if { $run == 1 } { if [catch {exec $tclsh_path \ << "source $test_path/test.tcl; \ eval {subdb -$i $j $j $display \ $run stdout} $args" \ >>& ALL.OUT } res] { set o [open ALL.OUT a] puts $o "FAIL: subdb -$i $j $j" close $o } } } } } set args $save_args # # Run access method tests at default page size in one env. # foreach i "btree rbtree hash queue queueext recno frecno rrecno" { puts "Running $i tests in a txn env" for { set j 1 } { $j <= $num_test(test) } { incr j } { if { $run == 0 } { set o [open ALL.OUT a] run_envmethod -$i $j $j $display \ $run $o $args close $o } if { $run } { if [catch {exec $tclsh_path \ << "source $test_path/test.tcl; \ run_envmethod -$i $j $j \ $display $run stdout $args" \ >>& ALL.OUT } res] { set o [open ALL.OUT a] puts $o \ "FAIL: run_envmethod $i $j $j" close $o } } } } # # Run tests using proc r. The replication tests have been # moved from run_std to run_all. # set test_list { {"replication" "rep"} {"security" "sec"} } # # If configured for RPC, then run rpc tests too. # if { [file exists ./berkeley_db_svc] || [file exists ./berkeley_db_cxxsvc] || [file exists ./berkeley_db_javasvc] } { append test_list {{"RPC" "rpc"}} } foreach pair $test_list { set msg [lindex $pair 0] set cmd [lindex $pair 1] puts "Running $msg tests" if [catch {exec $tclsh_path \ << "source $test_path/test.tcl; \ r $rflags $cmd $args" >>& ALL.OUT } res] { set o [open ALL.OUT a] puts $o "FAIL: $cmd test" close $o } } # If not actually running, no need to check for failure. if { $run == 0 } { return } set failed 0 set o [open ALL.OUT r] while { [gets $o line] >= 0 } { if { [regexp {^FAIL} $line] != 0 } { set failed 1 } } close $o set o [open ALL.OUT a] if { $failed == 0 } { puts "Regression Tests Succeeded" puts $o "Regression Tests Succeeded" } else { puts "Regression Tests Failed; see ALL.OUT for log" puts $o "Regression Tests Failed" } puts -nonewline "Test suite run completed at: " puts [clock format [clock seconds] -format "%H:%M %D"] puts -nonewline $o "Test suite run completed at: " puts $o [clock format [clock seconds] -format "%H:%M %D"] close $o}## Run method tests in one environment. (As opposed to run_envmethod# which runs each test in its own, new environment.)#proc run_envmethod1 { method {start 1} {stop 0} {display 0} {run 1} \ { outfile stdout } args } { global __debug_on global __debug_print global __debug_test global is_envmethod global num_test global parms source ./include.tcl set stopsdb $num_test(sdb) if { $stop == 0 } { set stop $num_test(test) } else { if { $stopsdb > $stop } { set stopsdb $stop } } if { $run == 1 } { puts "run_envmethod1: $method $start $stop $args" } set is_envmethod 1 if { $run == 1 } { check_handles env_cleanup $testdir error_check_good envremove [berkdb envremove -home $testdir] 0 set env [eval {berkdb_env -create -cachesize {0 10000000 0}} \ {-mode 0644 -home $testdir}] error_check_good env_open [is_valid_env $env] TRUE append largs " -env $env " } if { $display } { # The envmethod1 tests can't be split up, since they share # an env. puts $outfile "eval run_envmethod1 $method $args" } set stat [catch { for { set i $start } { $i <= $stopsdb } {incr i} { set name [format "subdb%03d" $i] if { [info exists parms($name)] != 1 } { puts stderr "[format Subdb%03d $i] disabled in\ testparams.tcl; skipping." continue } if { $run } { puts $outfile "[timestamp]" eval $name $method $parms($name) $largs if { $__debug_print != 0 } { puts $outfile "" } if { $__debug_on != 0 } { debug $__debug_test } } flush stdout flush stderr } } res] if { $stat != 0} { global errorInfo; set fnl [string first "\n" $errorInfo] set theError [string range $errorInfo 0 [expr $fnl - 1]] if {[string first FAIL $errorInfo] == -1} { error "FAIL:[timestamp]\ run_envmethod: $method $i: $theError" } else { error $theError; } } set stat [catch { for { set i $start } { $i <= $stop } {incr i} { set name [format "test%03d" $i] if { [info exists parms($name)] != 1 } { puts stderr "[format Test%03d $i] disabled in\ testparams.tcl; skipping." continue } if { $run } { puts $outfile "[timestamp]" eval $name $method $parms($name) $largs if { $__debug_print != 0 } { puts $outfile "" } if { $__debug_on != 0 } { debug $__debug_test } } flush stdout flush stderr } } res] if { $stat != 0} { global errorInfo; set fnl [string first "\n" $errorInfo] set theError [string range $errorInfo 0 [expr $fnl - 1]] if {[string first FAIL $errorInfo] == -1} { error "FAIL:[timestamp]\ run_envmethod1: $method $i: $theError" } else { error $theError; } } if { $run == 1 } { error_check_good envclose [$env close] 0 check_handles $outfile } set is_envmethod 0}# We want to test all of 512b, 8Kb, and 64Kb pages, but chances are one# of these is the default pagesize. We don't want to run all the AM tests# twice, so figure out what the default page size is, then return the# other two.proc get_test_pagesizes { } { # Create an in-memory database. set db [berkdb_open -create -btree] error_check_good gtp_create [is_valid_db $db] TRUE set statret [$db stat] set pgsz 0 foreach pair $statret { set fld [lindex $pair 0] if { [string compare $fld {Page size}] == 0 } { set pgsz [lindex $pair 1] } } error_check_good gtp_close [$db close] 0 error_check_bad gtp_pgsz $pgsz 0 switch $pgsz { 512 { return {8192 32768} } 8192 { return {512 32768} } 32768 { return {512 8192} } default { return {512 8192 32768} } } error_check_good NOTREACHED 0 1}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -