CFX 作业脚本示例

#!/bin/bash
#SBATCH -J cartest
#SBATCH -p kshctest
#SBATCH -n 120
#SBATCH -N 4
#SBATCH --ntasks-per-node=30
##SBATCH -w a01r1n[05-06],a01r2n01,a01r3n00,a01r4n00
unset SLURM_GTIDS

export PBS_ENABLED=1
export DISPLAY=login09:1


APP_EXEC=/public/software/ANSYS/ansys2019/ansys_inc/v190/CFX/bin/cfx5solve
WORK_DIR=/public/home/liucheng/test/ansys_test/cfx_case
INPUT_FILE=cartest.def
RES_FILE=cartest.res
OUTPUT_TXT=cartest.txt

cd ${WORK_DIR}

NP=$SLURM_PROCS
HOST_FILE=$(generate_pbs_nodefile)

cat ${HOST_FILE} > ${WORK_DIR}/HOST_STRING

HOST_STRING=""
for i_node in `cat ${HOST_FILE} | uniq`; do
    i_ppn=`cat ${HOST_FILE} | grep ${i_node} | wc -l`
    if [ -z ${HOST_STRING} ];then
        HOST_STRING="${i_node}*${i_ppn}"
    else
        HOST_STRING="${HOST_STRING},${i_node}*${i_ppn}"
    fi
done

echo "The hosts is ${HOST_STRING}" >> ${WORK_DIR}/HOST_STRING

export I_MPI_FABRICS=shm:dapl 
export I_MPI_DAPL_UD=enable
export I_MPI_FALLBACK_DEVICE=disable
export I_MPI_DEBUG=0
export I_MPI_PIN=disable
export I_MPI_ADJUST_REDUCE=2
export I_MPI_ADJUST_ALLREDUCE=2
export I_MPI_ADJUST_BCAST=1 
export I_MPI_PLATFORM=auto
export I_MPI_DAPL_SCALABLE_PROGRESS=1

export CFX5RSH=ssh
${APP_EXEC} -manager -display $DISPLAY -double -par-dist ${HOST_STRING} -start-method 'Intel MPI Distributed Parallel' -def ${WORK_DIR}/${INPUT_FILE} 2>&1 | tee -a ${WORK_DIR}/${OUTPUT_TXT}

results matching ""

    No results matching ""