129 lines
4.7 KiB
Bash
Executable File
129 lines
4.7 KiB
Bash
Executable File
##########################################
|
|
## HOSTNAMES ##
|
|
##########################################
|
|
|
|
function run_cheaha() {
|
|
echo "#!/usr/bin/env bash" > temp_sbatch
|
|
echo "#SBATCH --ntasks=${ntasks}" >> temp_sbatch
|
|
echo "#SBATCH --partition=${partition}" >> temp_sbatch
|
|
echo "#SBATCH --time=${time}" >> temp_sbatch
|
|
echo "#SBATCH --mem-per-cpu=${mem_per_cpu}" >> temp_sbatch
|
|
echo "#SBATCH --nodes=${nodes}" >> temp_sbatch
|
|
echo "#SBATCH --job-name=${job}_${this_step}_${final_step_num}" >> temp_sbatch
|
|
echo '# Load module(s)' >> temp_sbatch
|
|
echo 'module load rc/NAMD/2.12' >> temp_sbatch
|
|
|
|
# generate temp NAMD nodelist
|
|
echo 'for n in `echo ${SLURM_NODELIST} | scontrol show hostnames`; do' >> temp_sbatch
|
|
echo ' echo "host $n ++cpus 24" >> ${TMPDIR}/nodelist.${SLURM_JOBID}' >> temp_sbatch
|
|
echo 'done' >> temp_sbatch
|
|
|
|
# calculate ppn
|
|
echo 'PPN=$(expr $SLURM_NTASKS / $SLURM_NNODES - 1)' >> temp_sbatch
|
|
echo 'P="$(($PPN * $SLURM_NNODES))"' >> temp_sbatch
|
|
|
|
# find namd location
|
|
echo 'namd_bin="$(which namd2)"' >> temp_sbatch
|
|
|
|
echo "$(echo "${namd_param}") ${conf} > ${out}" >> temp_sbatch
|
|
|
|
# cleanup temp NAMD nodelist
|
|
echo 'rm ${TMPDIR}/nodelist.${SLURM_JOBID}' >> temp_sbatch
|
|
|
|
# submit using sbatch
|
|
sbatch temp_sbatch
|
|
rm temp_sbatch
|
|
}
|
|
|
|
function run_workstation() {
|
|
$(echo "${namd_param}") ${conf} > ${out}
|
|
}
|
|
|
|
function run_asc() {
|
|
job_name_pref=${job##*.}_${this_step}_${final_step_num}
|
|
|
|
/apps/scripts/check_ssh
|
|
/apps/scripts/qquery program=namd input=$conf
|
|
|
|
# create ~/.asc_queue
|
|
|
|
echo 'start_time_pref=SOONEST' > ${HOME}/.asc_queue
|
|
echo "job_name_pref=${job_name_pref}" >> ${HOME}/.asc_queue
|
|
echo "queue_name_pref=${queue_name_pref}" >> ${HOME}/.asc_queue
|
|
echo "num_cpus_pref=${num_cpus_pref}" >> ${HOME}/.asc_queue
|
|
echo "cpu_time_pref=${cpu_time_pref}" >> ${HOME}/.asc_queue
|
|
echo "memory_pref=${memory_pref}" >> ${HOME}/.asc_queue
|
|
echo "cluster_pref=${cluster_pref}" >> ${HOME}/.asc_queue
|
|
|
|
if [ -f qfile ]
|
|
then
|
|
read queue time memory sttime num_cpus < qfile
|
|
rm qfile
|
|
else
|
|
echo "ERROR: qfile not found"
|
|
echo " Make sure you have write permissions in this directory"
|
|
exit
|
|
fi
|
|
|
|
# get limits
|
|
limits=`/apps/scripts/set_limits $time $num_cpus NA $memory NA NA`
|
|
|
|
# get qos
|
|
qos=`/apps/scripts/set_qos $queue`
|
|
|
|
# get constraints
|
|
constraints="--constraint=dmc|uv" # both clusters, not KNL
|
|
|
|
echo '#!/usr/bin/env bash' > temp_sbatch
|
|
echo 'pwd="$(pwd)"' >> temp_sbatch
|
|
echo 'echo "pwd: $pwd"' >> temp_sbatch
|
|
echo '# set the workdir variable' >> temp_sbatch
|
|
echo "export workdir=/scratch/${LOGNAME}/${job##*./}" >> temp_sbatch
|
|
echo 'echo "workdir: $workdir"' >> temp_sbatch
|
|
echo '# load modules' >> temp_sbatch
|
|
echo 'source /opt/asn/etc/asn-bash-profiles-special/modules.sh' >> temp_sbatch
|
|
echo 'module purge' >> temp_sbatch
|
|
echo 'if [ ! -d $workdir ]' >> temp_sbatch
|
|
echo 'then' >> temp_sbatch
|
|
echo ' mkdir -p $workdir' >> temp_sbatch
|
|
echo 'fi' >> temp_sbatch
|
|
echo 'export TEMPDIR=$workdir' >> temp_sbatch
|
|
echo 'export TMPDIR=$workdir' >> temp_sbatch
|
|
echo 'export CONV_RSH=ssh' >> temp_sbatch
|
|
echo '# run from the current directory' >> temp_sbatch
|
|
echo "./cp_wild_r $job \$workdir" >> temp_sbatch
|
|
echo 'sleep 15' >> temp_sbatch
|
|
echo 'cd $workdir' >> temp_sbatch
|
|
echo 'export uv_test=`hostname | grep uv | wc -l`' >> temp_sbatch
|
|
echo 'export dmc_test=`hostname | grep dmc | wc -l`' >> temp_sbatch
|
|
echo '# code for the DMC' >> temp_sbatch
|
|
echo 'if [ $dmc_test == "1" ]' >> temp_sbatch
|
|
echo 'then' >> temp_sbatch
|
|
echo ' module purge' >> temp_sbatch
|
|
echo ' module load namd/2.12_ibverbs' >> temp_sbatch
|
|
echo ' namd_bin="$(which namd2)"' >> temp_sbatch
|
|
echo ' # run NAMD' >> temp_sbatch
|
|
echo ' alias mpiexec="srun"' >> temp_sbatch
|
|
echo " charmrun +p $num_cpus_pref ++mpiexec \${namd_bin} ${conf##*/} > ${out##*/}" >> temp_sbatch
|
|
echo 'fi' >> temp_sbatch
|
|
echo '# code for the UV' >> temp_sbatch
|
|
echo 'if [ "$uv_test" == "1" ]' >> temp_sbatch
|
|
echo 'then' >> temp_sbatch
|
|
echo ' module purge' >> temp_sbatch
|
|
echo ' module load namd/2.12' >> temp_sbatch
|
|
echo ' namd_bin="$(which namd2)"' >> temp_sbatch
|
|
echo ' # run NAMD' >> temp_sbatch
|
|
echo " charmrun +p $num_cpus_pref \${namd_bin} ${conf##*/} > ${out##*/}" >> temp_sbatch
|
|
echo 'fi' >> temp_sbatch
|
|
echo 'sleep 10' >> temp_sbatch
|
|
echo 'cd ${pwd}' >> temp_sbatch
|
|
echo "./cp_wild_r \$workdir $job" >> temp_sbatch
|
|
echo 'sleep 15' >> temp_sbatch
|
|
echo 'rm -r $workdir' >> temp_sbatch
|
|
echo 'exit 0' >> temp_sbatch
|
|
|
|
sbatch $qos -J $job_name_pref --begin=$sttime --requeue $limits $constraints temp_sbatch
|
|
rm temp_sbatch
|
|
|
|
}
|