📄 fold_alanin.conf
字号:
# configuration for replica exchange scripts# run simulation: tclsh ../replica_exchange.tcl fold_alanin.conf# to continue: tclsh ../replica_exchange.tcl restart_1.conf# view in VMD: source fold_alanin.conf; source ../show_replicas.tcl# add continued: source restart_1.conf; source ../show_replicas.tcl# show both: vmd -e load_all.vmdset num_replicas 8set min_temp 300set max_temp 600set steps_per_run 1000set num_runs 10000# num_runs should be divisible by runs_per_frame * frames_per_restartset runs_per_frame 10set frames_per_restart 10set namd_config_file "alanin_base.namd"set output_root "output/fold_alanin" ; # directory must exist# the following used only by show_replicas.vmdset psf_file "alanin.psf"set initial_pdb_file "unfolded.pdb"set fit_pdb_file "alanin.pdb"set namd_bin_dir /Projects/namd2/bin/current/Linux64set server_port 3177# Uncomment and/or modify a spawn command below based on your environment# NOTE: Running namd2 through charmrun interferes with socket connections;# run the namd2 binary directly (in standalone mode). MPI might work.# spread jobs across a given set of machinesset spawn_namd_command \ [list spawn_namd_ssh "cd [pwd]; [file join $namd_bin_dir namd2] +netpoll" \ [list beirut belfast] ]# run jobs on local machine# set spawn_namd_command \# [list spawn_namd_simple "[file join $namd_bin_dir namd2] +netpoll"]# spread jobs across machines assigned by queueing system# set spawn_namd_command \# [list spawn_namd_ssh "cd [pwd]; [file join $namd_bin_dir namd2] +netpoll" \# $env(LSB_HOSTS) ]# spread jobs across machines assigned by queueing system# set spawn_namd_command \# [list spawn_namd_ssh "cd [pwd]; [file join $namd_bin_dir namd2] +netpoll" \# [read [open $env(HOST_FILE) "r"]] ]# run parallel jobs - this may or may not work# set spawn_namd_command \# [list spawn_namd_parallel# "cd [pwd]; $namd_bin_dir/charmrun $namd_bin_dir/namd2 +netpoll" \# $env(LSB_HOSTS) 1 ]
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -