⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 testutils.tcl

📁 这是linux下运行的mysql软件包,可用于linux 下安装 php + mysql + apach 的网络配置
💻 TCL
📖 第 1 页 / 共 5 页
字号:
		set maj [lindex $vers 0]		set min [lindex $vers 1]		# Is this machine big or little endian?  We want to mark		# the test directories appropriately, since testing		# little-endian databases generated by a big-endian machine,		# and/or vice versa, is interesting.		if { [big_endian] } {			set myendianness be		} else {			set myendianness le		}		if { $upgrade_be == 1 } {			set version_dir "$myendianness-$maj.${min}be"			set en be		} else {			set version_dir "$myendianness-$maj.${min}le"			set en le		}		set dest $upgrade_dir/$version_dir/$upgrade_method		exec mkdir -p $dest		set dbfiles [glob -nocomplain $dir/*.db]		foreach dbfile $dbfiles {			set basename [string range $dbfile \			    [expr [string length $dir] + 1] end-3]			set newbasename $upgrade_name-$basename			# db_dump file			error_check_good db_dump($dbfile) \			    [catch {exec $util_path/db_dump -k $dbfile > \			    $dir/$newbasename.dump}] 0			# tcl_dump file			upgrade_dump $dbfile \			    $dir/$newbasename.tcldump			# Rename dbfile and any dbq files.			file rename $dbfile $dir/$newbasename-$en.db			foreach dbq \			    [glob -nocomplain $dir/__dbq.$basename.db.*] {				set s [string length $dir/__dbq.]				set newname [string replace $dbq $s \				    [expr [string length $basename] + $s - 1] \				    $newbasename-$en]				file rename $dbq $newname			}			set cwd [pwd]			cd $dir			catch {eval exec tar -cvf $dest/$newbasename.tar \			    [glob $newbasename* __dbq.$newbasename-$en.db.*]}			catch {exec gzip -9v $dest/$newbasename.tar}			cd $cwd		}	}#	check_handles	set remfiles {}	set ret [catch { glob $dir/* } result]	if { $ret == 0 } {		foreach fileorig $result {			#			# We:			# - Ignore any env-related files, which are			# those that have __db.* or log.* if we are			# running in an env.  Also ignore files whose			# names start with REPDIR_;  these are replication			# subdirectories.			# - Call 'dbremove' on any databases.			# Remove any remaining temp files.			#			switch -glob -- $fileorig {			*/DIR_* -			*/__db.* -			*/log.*	{				if { $env != "NULL" } {					continue				} else {					if { $is_qnx_test } {						catch {berkdb envremove -force \						    -home $dir} r					}					lappend remfiles $fileorig				}				}			*.db	{				set envargs ""				set encarg ""				#				# If in an env, it should be open crypto				# or not already.				#				if { $env != "NULL"} {					set file [file tail $fileorig]					set envargs " -env $env "					if { [is_txnenv $env] } {						append envargs " -auto_commit "					}				} else {					if { $old_encrypt != 0 } {						set encarg "-encryptany $passwd"					}					set file $fileorig				}				# If a database is left in a corrupt				# state, dbremove might not be able to handle				# it (it does an open before the remove).				# Be prepared for this, and if necessary,				# just forcibly remove the file with a warning				# message.				set ret [catch \				    {eval {berkdb dbremove} $envargs $encarg \				    $file} res]				if { $ret != 0 } {					# If it failed, there is a chance					# that the previous run was using					# encryption and we cannot know about					# it (different tclsh instantiation).					# Try to remove it with crypto.					if { $env == "NULL" && \					    $old_encrypt == 0} {						set ret [catch \				    		    {eval {berkdb dbremove} \						    -encryptany $passwd \				    		    $envargs $file} res]					}					if { $ret != 0 } {						if { $quiet == 0 } {							puts \				    "FAIL: dbremove in cleanup failed: $res"						}						set file $fileorig						lappend remfiles $file					}				}				}			default	{				lappend remfiles $fileorig				}			}		}		if {[llength $remfiles] > 0} {			eval fileremove -f $remfiles		}	}}proc log_cleanup { dir } {	source ./include.tcl	set files [glob -nocomplain $dir/log.*]	if { [llength $files] != 0} {		foreach f $files {			fileremove -f $f		}	}}proc env_cleanup { dir } {	global old_encrypt	global passwd	source ./include.tcl	set encarg ""	if { $old_encrypt != 0 } {		set encarg "-encryptany $passwd"	}	set stat [catch {eval {berkdb envremove -home} $dir $encarg} ret]	#	# If something failed and we are left with a region entry	# in /dev/shmem that is zero-length, the envremove will	# succeed, and the shm_unlink will succeed, but it will not	# remove the zero-length entry from /dev/shmem.  Remove it	# using fileremove or else all other tests using an env	# will immediately fail.	#	if { $is_qnx_test == 1 } {		set region_files [glob -nocomplain /dev/shmem/$dir*]		if { [llength $region_files] != 0 } {			foreach f $region_files {				fileremove -f $f			}		}	}	log_cleanup $dir	cleanup $dir NULL}proc remote_cleanup { server dir localdir } {	set home [file tail $dir]	error_check_good cleanup:remove [berkdb envremove -home $home \	    -server $server] 0	catch {exec rsh $server rm -f $dir/*} ret	cleanup $localdir NULL}proc help { cmd } {	if { [info command $cmd] == $cmd } {		set is_proc [lsearch [info procs $cmd] $cmd]		if { $is_proc == -1 } {			# Not a procedure; must be a C command			# Let's hope that it takes some parameters			# and that it prints out a message			puts "Usage: [eval $cmd]"		} else {			# It is a tcl procedure			puts -nonewline "Usage: $cmd"			set args [info args $cmd]			foreach a $args {				set is_def [info default $cmd $a val]				if { $is_def != 0 } {					# Default value					puts -nonewline " $a=$val"				} elseif {$a == "args"} {					# Print out flag values					puts " options"					args				} else {					# No default value					puts -nonewline " $a"				}			}			puts ""		}	} else {		puts "$cmd is not a command"	}}# Run a recovery test for a particular operation# Notice that we catch the return from CP and do not do anything with it.# This is because Solaris CP seems to exit non-zero on occasion, but# everything else seems to run just fine.## We split it into two functions so that the preparation and command# could be executed in a different process than the recovery.#proc op_codeparse { encodedop op } {	set op1 ""	set op2 ""	switch $encodedop {	"abort" {		set op1 $encodedop		set op2 ""	}	"commit" {		set op1 $encodedop		set op2 ""	}	"prepare-abort" {		set op1 "prepare"		set op2 "abort"	}	"prepare-commit" {		set op1 "prepare"		set op2 "commit"	}	"prepare-discard" {		set op1 "prepare"		set op2 "discard"	}	}	if { $op == "op" } {		return $op1	} else {		return $op2	}}proc op_recover { encodedop dir env_cmd dbfile cmd msg } {	source ./include.tcl	set op [op_codeparse $encodedop "op"]	set op2 [op_codeparse $encodedop "sub"]	puts "\t$msg $encodedop"	set gidf ""	if { $op == "prepare" } {		sentinel_init		# Fork off a child to run the cmd		# We append the gid, so start here making sure		# we don't have old gid's around.		set outfile $testdir/childlog		fileremove -f $testdir/gidfile		set gidf $testdir/gidfile		set pidlist {}		# puts "$tclsh_path $test_path/recdscript.tcl $testdir/recdout \		#    $op $dir $env_cmd $dbfile $gidf $cmd"		set p [exec $tclsh_path $test_path/wrap.tcl recdscript.tcl \		    $testdir/recdout $op $dir $env_cmd $dbfile $gidf $cmd &]		lappend pidlist $p		watch_procs $pidlist 5		set f1 [open $testdir/recdout r]		set r [read $f1]		puts -nonewline $r		close $f1		fileremove -f $testdir/recdout	} else {		op_recover_prep $op $dir $env_cmd $dbfile $gidf $cmd	}	op_recover_rec $op $op2 $dir $env_cmd $dbfile $gidf}proc op_recover_prep { op dir env_cmd dbfile gidf cmd } {	global log_log_record_types	global recd_debug	global recd_id	global recd_op	source ./include.tcl	#puts "op_recover: $op $dir $env $dbfile $cmd"	set init_file $dir/t1	set afterop_file $dir/t2	set final_file $dir/t3	# Keep track of the log types we've seen	if { $log_log_record_types == 1} {		logtrack_read $dir	}	# Save the initial file and open the environment and the file	catch { file copy -force $dir/$dbfile $dir/$dbfile.init } res	copy_extent_file $dir $dbfile init	convert_encrypt $env_cmd	set env [eval $env_cmd]	error_check_good envopen [is_valid_env $env] TRUE	set db [berkdb open -auto_commit -env $env $dbfile]	error_check_good dbopen [is_valid_db $db] TRUE	# Dump out file contents for initial case	open_and_dump_file $dbfile $env $init_file nop \	    dump_file_direction "-first" "-next"	set t [$env txn]	error_check_bad txn_begin $t NULL	error_check_good txn_begin [is_substr $t "txn"] 1	# Now fill in the db, tmgr, and the txnid in the command	set exec_cmd $cmd	set i [lsearch $cmd ENV]	if { $i != -1 } {		set exec_cmd [lreplace $exec_cmd $i $i $env]	}	set i [lsearch $cmd TXNID]	if { $i != -1 } {		set exec_cmd [lreplace $exec_cmd $i $i $t]	}	set i [lsearch $exec_cmd DB]	if { $i != -1 } {		set exec_cmd [lreplace $exec_cmd $i $i $db]	}	# To test DB_CONSUME, we need to expect a record return, not "0".	set i [lsearch $exec_cmd "-consume"]	if { $i	!= -1 } {		set record_exec_cmd_ret 1	} else {		set record_exec_cmd_ret 0	}	# For the DB_APPEND test, we need to expect a return other than	# 0;  set this flag to be more lenient in the error_check_good.	set i [lsearch $exec_cmd "-append"]	if { $i != -1 } {		set lenient_exec_cmd_ret 1	} else {		set lenient_exec_cmd_ret 0	}	# Execute command and commit/abort it.	set ret [eval $exec_cmd]	if { $record_exec_cmd_ret == 1 } {		error_check_good "\"$exec_cmd\"" [llength [lindex $ret 0]] 2	} elseif { $lenient_exec_cmd_ret == 1 } {		error_check_good "\"$exec_cmd\"" [expr $ret > 0] 1	} else {		error_check_good "\"$exec_cmd\"" $ret 0	}	set record_exec_cmd_ret 0	set lenient_exec_cmd_ret 0	# Sync the file so that we can capture a snapshot to test recovery.	error_check_good sync:$db [$db sync] 0	catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res	copy_extent_file $dir $dbfile afterop	open_and_dump_file $dir/$dbfile.afterop NULL \		$afterop_file nop dump_file_direction "-first" "-next"	#puts "\t\t\tExecuting txn_$op:$t"	if { $op == "prepare" } {		set gid [make_gid global:$t]		set gfd [open $gidf w+]		puts $gfd $gid		close $gfd		error_check_good txn_$op:$t [$t $op $gid] 0	} else {		error_check_good txn_$op:$t [$t $op] 0	}	switch $op {		"commit" { puts "\t\tCommand executed and committed." }		"abort" { puts "\t\tCommand executed and aborted." }		"prepare" { puts "\t\tCommand executed and prepared." }	}	# Sync the file so that we can capture a snapshot to test recovery.	error_check_good sync:$db [$db sync] 0	catch { file copy -force $dir/$dbfile $dir/$dbfile.final } res	copy_extent_file $dir $dbfile final	open_and_dump_file $dir/$dbfile.final NULL \	    $final_file nop dump_file_direction "-first" "-next"	# If this is an abort or prepare-abort, it should match the	#   original file.	# If this was a commit or prepare-commit, then this file should	#   match the afterop file.	# If this was a prepare without an abort or commit, we still	#   have transactions active, and peering at the database from	#   another environment will show data from uncommitted transactions.	#   Thus we just skip this in the prepare-only case;  what	#   we care about are the results of a prepare followed by a	#   recovery, which we test later.	if { $op == "commit" } {		filesort $afterop_file $afterop_file.sort		filesort $final_file $final_file.sort		error_check_good \		    diff(post-$op,pre-commit):diff($afterop_file,$final_file) \		    [filecmp $afterop_file.sort $final_file.sort] 0	} elseif { $op == "abort" } {		filesort $init_file $init_file.sort		filesort $final_file $final_file.sort		error_check_good \		    diff(initial,post-$op):diff($init_file,$final_file) \		    [filecmp $init_file.sort $final_file.sort] 0	} else {		# Make sure this really is one of the prepare tests		error_check_good assert:prepare-test $op "prepare"	}	# Running recovery on this database should not do anything.	# Flush all data to disk, close the environment and save the	# file.	# XXX DO NOT CLOSE FILE ON PREPARE -- if you are prepared,	# you really have an active transaction and you're not allowed	# to close files that are being acted upon by in-process	# transactions.	if { $op != "prepare" } {		error_check_good close:$db [$db close] 0	}	#	# If we are running 'prepare' don't close the env with an	# active transaction.  Leave it alone so the close won't	# quietly abort it on us.	if { [is_substr $op "prepare"] != 1 } {		error_check_good envclose [$env close] 0	}	return}proc op_recover_rec { op op2 dir env_cmd dbfile gidf} {	global log_log_record_types	global recd_debug	global recd_id	global recd_op	global encrypt	global passwd	source ./include.tcl	#puts "op_recover_rec: $op $op2 $dir $env_cmd $dbfile $gidf"	set init_file $dir/t1	set afterop_file $dir/t2	set final_file $dir/t3	# Keep track of the log types we've seen	if { $log_log_record_types == 1} {		logtrack_read $dir	}	berkdb debug_check	puts -nonewline "\t\top_recover_rec: Running recovery ... "	flush stdout	set recargs "-h $dir -c "	if { $encrypt > 0 } {		append recargs " -P $passwd "	}	set stat [catch {eval exec $util_path/db_recover -e $recargs} result]	if { $stat == 1 } {		error "FAIL: Recovery error: $result."	}	puts -nonewline "complete ... "	#	# We cannot run db_recover here because that will open an env, run	# recovery, then close it, which will abort the outstanding txns.	# We want to do it ourselves.	#	set env [eval $env_cmd]	error_check_good dbenv [is_valid_widget $env env] TRUE	error_check_good db_verify [verify_dir $testdir "\t\t" 0 1] 0	puts "verified"	# If we left a txn as prepared, but not aborted or committed,	# we need to do a txn_recover.  Make sure we have the same	# number of txns we want.	if { $op == "prepare"} {		set txns [$env txn_recover]		error_check_bad txnrecover [llength $txns] 0		set gfd [open $gidf r]		set origgid [read -nonewline $gfd]		close $gfd		set txnlist [lindex $txns 0]		set t [lindex $txnlist 0]		set gid [lindex $txnlist 1]		error_check_good gidcompare $gid $origgid		puts "\t\t\tExecuting txn_$op2:$t"		error_check_good txn_$op2:$t [$t $op2] 0		#		# If we are testing discard, we do need to resolve		# the txn, so get the list again and now abort it.		#		if { $op2 == "discard" } {			set txns [$env txn_recover]			error_check_bad txnrecover [llength $txns] 0			set txnlist [lindex $txns 0]			set t [lindex $txnlist 0]			set gid [lindex $txnlist 1]			error_check_good gidcompare $gid $origgid			puts "\t\t\tExecuting txn_abort:$t"			error_check_good disc_txn_abort:$t [$t abort] 0		}	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -