📄 testutils.tcl
字号:
open_and_dump_file $dir/$dbfile NULL $final_file nop \ dump_file_direction "-first" "-next" if { $op == "commit" || $op2 == "commit" } { filesort $afterop_file $afterop_file.sort filesort $final_file $final_file.sort error_check_good \ diff(post-$op,pre-commit):diff($afterop_file,$final_file) \ [filecmp $afterop_file.sort $final_file.sort] 0 } else { filesort $init_file $init_file.sort filesort $final_file $final_file.sort error_check_good \ diff(initial,post-$op):diff($init_file,$final_file) \ [filecmp $init_file.sort $final_file.sort] 0 } # Now close the environment, substitute a file that will need # recovery and try running recovery again. reset_env $env if { $op == "commit" || $op2 == "commit" } { catch { file copy -force $dir/$dbfile.init $dir/$dbfile } res move_file_extent $dir $dbfile init copy } else { catch { file copy -force $dir/$dbfile.afterop $dir/$dbfile } res move_file_extent $dir $dbfile afterop copy } berkdb debug_check puts -nonewline "\t\tRunning recovery on pre-op database ... " flush stdout set stat [catch {eval exec $util_path/db_recover $recargs} result] if { $stat == 1 } { error "FAIL: Recovery error: $result." } puts -nonewline "complete ... " error_check_good db_verify_preop [verify_dir $testdir "\t\t" 0 1] 0 puts "verified" set env [eval $env_cmd] open_and_dump_file $dir/$dbfile NULL $final_file nop \ dump_file_direction "-first" "-next" if { $op == "commit" || $op2 == "commit" } { filesort $final_file $final_file.sort filesort $afterop_file $afterop_file.sort error_check_good \ diff(post-$op,recovered):diff($afterop_file,$final_file) \ [filecmp $afterop_file.sort $final_file.sort] 0 } else { filesort $init_file $init_file.sort filesort $final_file $final_file.sort error_check_good \ diff(initial,post-$op):diff($init_file,$final_file) \ [filecmp $init_file.sort $final_file.sort] 0 } # This should just close the environment, not blow it away. reset_env $env}proc populate { db method txn n dups bigdata } { source ./include.tcl set did [open $dict] set count 0 while { [gets $did str] != -1 && $count < $n } { if { [is_record_based $method] == 1 } { set key [expr $count + 1] } elseif { $dups == 1 } { set key duplicate_key } else { set key $str } if { $bigdata == 1 && [berkdb random_int 1 3] == 1} { set str [replicate $str 1000] } set ret [$db put -txn $txn $key $str] error_check_good db_put:$key $ret 0 incr count } close $did return 0}proc big_populate { db txn n } { source ./include.tcl set did [open $dict] set count 0 while { [gets $did str] != -1 && $count < $n } { set key [replicate $str 50] set ret [$db put -txn $txn $key $str] error_check_good db_put:$key $ret 0 incr count } close $did return 0}proc unpopulate { db txn num } { source ./include.tcl set c [eval {$db cursor} "-txn $txn"] error_check_bad $db:cursor $c NULL error_check_good $db:cursor [is_substr $c $db] 1 set i 0 for {set d [$c get -first] } { [llength $d] != 0 } { set d [$c get -next] } { $c del incr i if { $num != 0 && $ >= $num } { break } } error_check_good cursor_close [$c close] 0 return 0}proc reset_env { env } { error_check_good env_close [$env close] 0}proc minlocks { myenv locker_id obj_id num } { return [countlocks $myenv $locker_id $obj_id $num ]}proc maxlocks { myenv locker_id obj_id num } { return [countlocks $myenv $locker_id $obj_id $num ]}proc minwrites { myenv locker_id obj_id num } { return [countlocks $myenv $locker_id $obj_id $num ]}proc countlocks { myenv locker_id obj_id num } { set locklist "" for { set i 0} {$i < [expr $obj_id * 4]} { incr i } { set r [catch {$myenv lock_get read $locker_id \ [expr $obj_id * 1000 + $i]} l ] if { $r != 0 } { puts $l return ERROR } else { error_check_good lockget:$obj_id [is_substr $l $myenv] 1 lappend locklist $l } } # Now acquire a write lock if { $obj_id != 1 } { set r [catch {$myenv lock_get write $locker_id \ [expr $obj_id * 1000 + 10]} l ] if { $r != 0 } { puts $l return ERROR } else { error_check_good lockget:$obj_id [is_substr $l $myenv] 1 lappend locklist $l } } set ret [ring $myenv $locker_id $obj_id $num] foreach l $locklist { error_check_good lockput:$l [$l put] 0 } return $ret}# This routine will let us obtain a ring of deadlocks.# Each locker will get a lock on obj_id, then sleep, and# then try to lock (obj_id + 1) % num.# When the lock is finally granted, we release our locks and# return 1 if we got both locks and DEADLOCK if we deadlocked.# The results here should be that 1 locker deadlocks and the# rest all finish successfully.proc ring { myenv locker_id obj_id num } { source ./include.tcl if {[catch {$myenv lock_get write $locker_id $obj_id} lock1] != 0} { puts $lock1 return ERROR } else { error_check_good lockget:$obj_id [is_substr $lock1 $myenv] 1 } tclsleep 30 set nextobj [expr ($obj_id + 1) % $num] set ret 1 if {[catch {$myenv lock_get write $locker_id $nextobj} lock2] != 0} { if {[string match "*DEADLOCK*" $lock2] == 1} { set ret DEADLOCK } else { puts $lock2 set ret ERROR } } else { error_check_good lockget:$obj_id [is_substr $lock2 $myenv] 1 } # Now release the first lock error_check_good lockput:$lock1 [$lock1 put] 0 if {$ret == 1} { error_check_bad lockget:$obj_id $lock2 NULL error_check_good lockget:$obj_id [is_substr $lock2 $myenv] 1 error_check_good lockput:$lock2 [$lock2 put] 0 } return $ret}# This routine will create massive deadlocks.# Each locker will get a readlock on obj_id, then sleep, and# then try to upgrade the readlock to a write lock.# When the lock is finally granted, we release our first lock and# return 1 if we got both locks and DEADLOCK if we deadlocked.# The results here should be that 1 locker succeeds in getting all# the locks and everyone else deadlocks.proc clump { myenv locker_id obj_id num } { source ./include.tcl set obj_id 10 if {[catch {$myenv lock_get read $locker_id $obj_id} lock1] != 0} { puts $lock1 return ERROR } else { error_check_good lockget:$obj_id \ [is_valid_lock $lock1 $myenv] TRUE } tclsleep 30 set ret 1 if {[catch {$myenv lock_get write $locker_id $obj_id} lock2] != 0} { if {[string match "*DEADLOCK*" $lock2] == 1} { set ret DEADLOCK } else { set ret ERROR } } else { error_check_good \ lockget:$obj_id [is_valid_lock $lock2 $myenv] TRUE } # Now release the first lock error_check_good lockput:$lock1 [$lock1 put] 0 if {$ret == 1} { error_check_good \ lockget:$obj_id [is_valid_lock $lock2 $myenv] TRUE error_check_good lockput:$lock2 [$lock2 put] 0 } return $ret }proc dead_check { t procs timeout dead clean other } { error_check_good $t:$procs:other $other 0 switch $t { ring { # with timeouts the number of deadlocks is unpredictable if { $timeout != 0 && $dead > 1 } { set clean [ expr $clean + $dead - 1] set dead 1 } error_check_good $t:$procs:deadlocks $dead 1 error_check_good $t:$procs:success $clean \ [expr $procs - 1] } clump { error_check_good $t:$procs:deadlocks $dead \ [expr $procs - 1] error_check_good $t:$procs:success $clean 1 } oldyoung { error_check_good $t:$procs:deadlocks $dead 1 error_check_good $t:$procs:success $clean \ [expr $procs - 1] } minlocks { error_check_good $t:$procs:deadlocks $dead 1 error_check_good $t:$procs:success $clean \ [expr $procs - 1] } maxlocks { error_check_good $t:$procs:deadlocks $dead 1 error_check_good $t:$procs:success $clean \ [expr $procs - 1] } minwrites { error_check_good $t:$procs:deadlocks $dead 1 error_check_good $t:$procs:success $clean \ [expr $procs - 1] } default { error "Test $t not implemented" } }}proc rdebug { id op where } { global recd_debug global recd_id global recd_op set recd_debug $where set recd_id $id set recd_op $op}proc rtag { msg id } { set tag [lindex $msg 0] set tail [expr [string length $tag] - 2] set tag [string range $tag $tail $tail] if { $id == $tag } { return 1 } else { return 0 }}proc zero_list { n } { set ret "" while { $n > 0 } { lappend ret 0 incr n -1 } return $ret}proc check_dump { k d } { puts "key: $k data: $d"}proc reverse { s } { set res "" for { set i 0 } { $i < [string length $s] } { incr i } { set res "[string index $s $i]$res" } return $res}## This is a internal only proc. All tests should use 'is_valid_db' etc.#proc is_valid_widget { w expected } { # First N characters must match "expected" set l [string length $expected] incr l -1 if { [string compare [string range $w 0 $l] $expected] != 0 } { return $w } # Remaining characters must be digits incr l 1 for { set i $l } { $i < [string length $w] } { incr i} { set c [string index $w $i] if { $c < "0" || $c > "9" } { return $w } } return TRUE}proc is_valid_db { db } { return [is_valid_widget $db db]}proc is_valid_env { env } { return [is_valid_widget $env env]}proc is_valid_cursor { dbc db } { return [is_valid_widget $dbc $db.c]}proc is_valid_lock { lock env } { return [is_valid_widget $lock $env.lock]}proc is_valid_logc { logc env } { return [is_valid_widget $logc $env.logc]}proc is_valid_mpool { mpool env } { return [is_valid_widget $mpool $env.mp]}proc is_valid_page { page mpool } { return [is_valid_widget $page $mpool.pg]}proc is_valid_txn { txn env } { return [is_valid_widget $txn $env.txn]}proc is_valid_mutex { m env } { return [is_valid_widget $m $env.mutex]}proc is_valid_lock {l env} { return [is_valid_widget $l $env.lock]}proc is_valid_locker {l } { return [is_valid_widget $l ""]}proc send_cmd { fd cmd {sleep 2}} { source ./include.tcl puts $fd "if \[catch {set v \[$cmd\] ; puts \$v} ret\] { \ puts \"FAIL: \$ret\" \ }" puts $fd "flush stdout" flush $fd berkdb debug_check tclsleep $sleep set r [rcv_result $fd] return $r}proc rcv_result { fd } { set r [gets $fd result] error_check_bad remote_read $r -1 return $result}proc send_timed_cmd { fd rcv_too cmd } { set c1 "set start \[timestamp -r\]; " set c2 "puts \[expr \[timestamp -r\] - \$start\]" set full_cmd [concat $c1 $cmd ";" $c2] puts $fd $full_cmd puts $fd "flush stdout" flush $fd return 0}## The rationale behind why we have *two* "data padding" routines is outlined# below:## Both pad_data and chop_data truncate data that is too long. However,# pad_data also adds the pad character to pad data out to the fixed length# record length.## Which routine you call does not depend on the length of the data you're# using, but on whether you're doing a put or a get. When we do a put, we# have to make sure the data isn't longer than the size of a record because# otherwise we'll get an error (use chop_data). When we do a get, we want to# check that db padded everything correctly (use pad_data on the value against# which we are comparing).## We don't want to just use the pad_data routine for both purposes, because# we want to be able to test whether or not db is padding correctly. For# example, the queue access method had a bug where when a record was# overwritten (*not* a partial put), only the first n bytes of the new entry# were written, n being the new entry's (unpadded) length. So, if we did# a put with key,value pair (1, "abcdef") and then a put (1, "z"), we'd get# back (1,"zbcdef"). If we had used pad_data instead of chop_data, we would# have gotten the "correct" result, but we wouldn't have found this bug.proc chop_data {method data} { global fixed_len if {[is_fixed_length $method] == 1 && \ [string length $data] > $fixed_len} { return [eval {binary format a$fixed_len $data}] } else { return $data }}proc pad_data {method data} { global fixed_len if {[is_fixed_length $method] == 1} { return [eval {binary format a$fixed_len $data}] } else { return $data }}proc make_fixed_length {method data {pad 0}} { global fixed_len global fixed_pad if {[is_fixed_length $method] == 1} { if {[string length $data] > $fixed_len } { error_check_bad make_fixed_len:TOO_LONG 1 1 } while { [string length $data] < $fixed_len } { set data [format $data%c $fixed_pad] } } return $data}proc make_gid {data} { while { [string length $data] < 127 } { set data [format ${data}0] } return $data}proc make_gid {data} { while { [string length $data] < 128 } { set data [format ${data}0] } return $data}# shift data for partial# pad with fixed pad (which is NULL)proc partial_shift { data offset direction} { global fixed_len set len [expr $fixed_len - 1] if { [string compare $direction "right"] == 0 } { for { set i 1} { $i <= $offset } {incr i} { set data [binary format x1a$len $data] } } elseif { [string compare $direction "left"] == 0 } { for { set i 1} { $i <= $offset } {incr i} { set data [string range $data 1 end] set data [binary format a$len $data] } } return $data}# string compare does not always work to compare# this data, nor does expr (==)# specialized routine for comparison# (for use in fixed len recno and q)proc binary_compare { data1 data2 } { if { [string length $data1] != [string length $data2] || \ [string compare -length \ [string length $data1] $data1 $data2] != 0 } {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -