diff --git a/Abinit/compile.sh b/Abinit/compile.sh index 925cdacb93988a088706c5332c00d33640bba0f3..50a83705c8011ffa920e017573a87813ec3b37f0 100644 --- a/Abinit/compile.sh +++ b/Abinit/compile.sh @@ -1,7 +1,9 @@ -export ABINIT_ROOT="$SCRATCHDIR/Abinit" -export ETSFIO_ROOT="$SCRATCHDIR/Etsfio" -export XC_ROOT="$SCRATCHDIR/LibXC" -./configure --prefix=$ABINIT_ROOT CC=mpicc CXX=mpiCC FC=mpif90 F77=mpif77 --enable-mpi=yes --enable-openmp=yes --with-linalg-flavor=mkl+scalapack --with-linalg-libs="-lscalapack -L$MKL_LIBS -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core" --with-math-flavor=gsl --with-dft-flavor=libxc --with-libxc-incs=-I$XC_ROOT/include --with-libxc-libs="-lxcf90 -lxc" --with-fft-flavor=fftw3-mkl --with-trio-flavor=netcdf --with-etsf-io-incs=-I$ETSFIO_ROOT/include --with-etsf-io-libs="-letsf_io_low_level -letsf_io_utils -letsf_io" --with-netcdf-incs=-I. --with-netcdf-libs="-lnetcdff -lnetcdf" +#!/bin/bash -make -j 20 -make -j 20 install +export CPATH=${MKLROOT}/include:$CPATH +export FCFLAGS_OPENMP="-fopenmp" +export FCFLAGS=' -extend-source -noaltparam -nofpscomp -fopenmp' +env CC="mpicc -fopenmp -fPIC" CXX="mpiCC -fopenmp -fPIC" F77="mpif90 -fopenmp -fPIC" F90="mpif90 -fopenmp -fPIC" FC="mpif90 -fopenmp -fPIC" ./configure --disable-debug --enable-mpi --disable-xml --with-linalg-flavor="mkl+scalapack" --with-linalg-libs="-L${MKLROOT}/lib/intel64 -lmkl_scalapack_lp64 -lmkl_blacs_openmpi_lp64 -mkl" --with-fft-flavor=fftw3 --with-fft-libs="-lfftw3xf_intel_pic -mkl" --with-dft-flavor="libxc+atompaw+wannier90" --enable-mpi-io --with-trio-flavor="netcdf" --enable-gw-dpc="no" --with-tardir=/tmp/tardir --prefix=$HOME/ABINIT --enable-openmp --enable-optim + +make -j 40 +make install diff --git a/Abinit/download.sh b/Abinit/download.sh index 84d88edf1db85e40e62b369944efd4dc8a64ed8a..c352cfe7f76ac4886116ef55408ed614188dfb4e 100644 --- a/Abinit/download.sh +++ b/Abinit/download.sh @@ -1,7 +1,15 @@ #!/bin/sh -wget https://www.abinit.org/sites/default/files/packages/abinit-8.10.3.tar.gz +mkdir -p /tmp/tardir +cd /tmp/tardir +wget http://forge.abinit.org/fallbacks/atompaw-4.0.1.0.tar.gz +wget http://forge.abinit.org/fallbacks/libxc-3.0.0.tar.gz +wget http://forge.abinit.org/fallbacks/netcdf-4.1.1.tar.gz +wget http://forge.abinit.org/fallbacks/wannier90-2.0.1.1.tar.gz +cd .. +mkdir tarballs +wget https://www.abinit.org/sites/default/files/packages/abinit-8.10.3.tar.gz tar xf abinit-8.10.3.tar.gz cd abinit-8.10.3 diff --git a/Smilei/machines/irene-amd/env b/Deepgalaxy/.gitkeep similarity index 100% rename from Smilei/machines/irene-amd/env rename to Deepgalaxy/.gitkeep diff --git a/Deepgalaxy/README.md b/Deepgalaxy/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7a032fdebebf98409a2a820c5363b4d5dd807d91 --- /dev/null +++ b/Deepgalaxy/README.md @@ -0,0 +1,61 @@ +Description: +============ + +Presentation +------------ + +Deepgalaxy is an AI software. It's goal is to classify image of galaxy into refernces categories. It's Open-source, use CPU or GPU and designed for high performances on super-computers. + +Technical information: +---------------------- + +* website : +* Scientific domain : astrophysics +* Language : Python +* Parallelism : MPI +* GPU acceleration : Yes +* Scalability : high +* Vectorization: + + +Compilation and simulation: +=========================== + +Download: +--------- + +Sources are available at: + +To download this release, run: + +``` +./download.sh +``` + +Compile: +-------- + +No compilation because it's interpreted python + +Run and validate the simulation: +-------------------------------- + +For each test case, given in a separate folder (e.g. testcase_small), you can find three scripts: + +* prepare.sh: prepare the simulation (move data to the right location) +* run.sh : run the application and print out the evaluated metric +* validate.sh: validation of the simulation on a scientific point of view + +For running and validating the simulation, one should be able to do: +``` +cd testcase_XXX +./prepare.sh +./run.sh +./validate.sh +``` +And getting no error code returned. +Those steps can also be used in a batch file for running the simulation using a job scheduler. + + + + diff --git a/Deepgalaxy/machines/.gitkeep b/Deepgalaxy/machines/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Deepgalaxy/machines/jean-zay-gpu/.gitkeep b/Deepgalaxy/machines/jean-zay-gpu/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Gromacs/README.md b/Gromacs/README.md index 311e219c166318a5ff0093df0d83750721f71740..26b7a9a2f4f7c414536dd5936a200e7463859ccd 100644 --- a/Gromacs/README.md +++ b/Gromacs/README.md @@ -38,6 +38,8 @@ For the test, we will use a specific release. To download this release, run: Compile: -------- +Gromacs needs cmake (works with 3.9.1) and python (2.7) to be compiled and depends on a version of fftw (works with fftw from mkl version 18) + Information (if needed) about how to compile the application. For instance: @@ -48,27 +50,33 @@ source machines/occigen-bdw/env ./compile.sh ``` -`machines/occigen-bdw/env` contains the information for compilation (module load gcc openmpi lapack hdf5 ...) +`machines/occigen-bdw/env` contains the information for compilation (module load intel intelmpi ...) Run and validate the simulation: -------------------------------- -For each test case, given in a separate folder (e.g. testcase_small), you can find three scripts: +For each test case, given in a separate folder (e.g. testcase_ion_channel_small and testcase_BSG_large), you can find three scripts: * prepare.sh: prepare the simulation (move data to the right location, recompile some minor changes, ...) * run.sh : run the application and print out the evaluated metric in a file called results (first numeric column representing ns/day corresponds to the evaluated metric to take into account) -* validate.sh: validation of the simulation (if the evaluated metric is print out, the bench is considered validated) +* validate.sh: validation of the simulation for (if the evaluated metric is print out and the simulation is completed with 20 000 steps for testcase_ion_channel_small and 1 200 000 steps for testcase_BSG_large, the bench is considered validated) For running and validating the simulation, one should be able to do: ``` cd testcase_XXX -./prepare.sh -./run.sh -./validate.sh +./prepare.sh compile_dir_name (ex : ./prepare.sh compile_02-07-20-09-45-52) +./run.sh run_dir_name (ex : ./run.sh run_02-07-20-09-52-08_compile_02-07-20-09-45-52) +./validate.sh run_dir_name (ex : ./validate.sh run_02-07-20-09-52-08_compile_02-07-20-09-45-52) ``` And getting no error code returned. Those steps can also be used in a batch file for running the simulation using a job scheduler. +Bench description run and assessment: +------------------------------------- + +The aim of the test case called testcase_ion_channel_small is to validate the compilation and to check that gromacs can be run on the target machine. It is not part of the evaluation. + +The target machine will be evaluated by the case called testcase_BSG_large. This test case must be run with 560 MPI tasks and the performance of the target machine will be evaluated by the ns/day criteria. diff --git a/Gromacs/machines/irene-amd/batch_large_BSG.slurm b/Gromacs/machines/irene-amd/batch_large_BSG.slurm new file mode 100644 index 0000000000000000000000000000000000000000..b6b6717a7e8c261b39ad00c6d70e6601d7287456 --- /dev/null +++ b/Gromacs/machines/irene-amd/batch_large_BSG.slurm @@ -0,0 +1,28 @@ +#!/bin/bash +#MSUB -J benchBSG +#MSUB -A tgcc0090 +#MSUB -q rome +#MSUB -m scratch +#MSUB -T 5400 +#MSUB -N 70 +#MSUB -n 560 +#MSUB -c 16 +#MSUB -o slurm.benchBSG.o.%I +#MSUB -e slurm.benchBSG.e.%I + +module purge +module load intel/18.0.1.163 mpi/intelmpi/2018.0.1.163 cmake/3.9.1 + +export PATH=GROMACS_HOME/bin:$PATH +export LD_LIBRARY_PATH=GROMACS_HOME/lib64:$LD_LIBRARY_PATH +export PKG_CONFIG_PATH=GROMACS_HOME/lib64/pkgconfig:$PKG_CONFIG_PATH +export GMXBIN=GROMACS_HOME/bin +export GMXLDLIB=GROMACS_HOME/lib64 +export GMXDATA=GROMACS_HOME/share/gromacs + +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK +export KMP_HW_SUBSET=1T +export KMP_AFFINITY=compact,1,0,granularity=fine + +ccc_mprun gmx_mpi mdrun -g -pin off -notunepme -maxh 0.9 -ntomp $SLURM_CPUS_PER_TASK -nsteps 1200000 -noconfout -s systWT250nsGr2018woEne.tpr -deffnm benchBSG -resethway +grep Performance slurm* > results diff --git a/Gromacs/machines/irene-amd/batch_large_ion_channel.slurm b/Gromacs/machines/irene-amd/batch_large_ion_channel.slurm new file mode 100644 index 0000000000000000000000000000000000000000..8201eb7b8553ff6d19d60ff0ef58617b3ac942e8 --- /dev/null +++ b/Gromacs/machines/irene-amd/batch_large_ion_channel.slurm @@ -0,0 +1,28 @@ +#!/bin/bash +#MSUB -J gr_ion_channel +#MSUB -A tgcc0090 +#MSUB -q rome +#MSUB -m scratch +#MSUB -T 1800 +#MSUB -N 16 +#MSUB -n 128 +#MSUB -c 16 +#MSUB -o slurm.gr_ion_channel.o.%I +#MSUB -e slurm.gr_ion_channel.e.%I + +module purge +module load intel/18.0.1.163 mpi/intelmpi/2018.0.1.163 cmake/3.9.1 + +export PATH=GROMACS_HOME/bin:$PATH +export LD_LIBRARY_PATH=GROMACS_HOME/lib64:$LD_LIBRARY_PATH +export PKG_CONFIG_PATH=GROMACS_HOME/lib64/pkgconfig:$PKG_CONFIG_PATH +export GMXBIN=GROMACS_HOME/bin +export GMXLDLIB=GROMACS_HOME/lib64 +export GMXDATA=GROMACS_HOME/share/gromacs + +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK +export KMP_HW_SUBSET=1T +export KMP_AFFINITY=compact,1,0,granularity=fine + +ccc_mprun gmx_mpi mdrun -pin off -notunepme -maxh 0.5 -ntomp $SLURM_CPUS_PER_TASK -nsteps 1000000 -noconfout -s ion_channel.tpr -deffnm large_ion_channel -resethway +grep Performance slurm* > results diff --git a/Gromacs/machines/irene-amd/batch_small_ion_channel.slurm b/Gromacs/machines/irene-amd/batch_small_ion_channel.slurm index 3fa338582558d0f413b0d5fb89cb73e2741eb0c4..4e7a27a6724955a71ca6d965624bb3a0bdb50459 100644 --- a/Gromacs/machines/irene-amd/batch_small_ion_channel.slurm +++ b/Gromacs/machines/irene-amd/batch_small_ion_channel.slurm @@ -5,10 +5,10 @@ #MSUB -m scratch #MSUB -T 1800 #MSUB -N 2 -#MSUB -n 64 -#MSUB -c 4 -#MSUB -o slurm.gr_ion_channel.o -#MSUB -e slurm.gr_ion_channel.e +#MSUB -n 8 +#MSUB -c 32 +#MSUB -o slurm.gr_ion_channel.o.%I +#MSUB -e slurm.gr_ion_channel.e.%I module purge module load intel/18.0.1.163 mpi/intelmpi/2018.0.1.163 cmake/3.9.1 @@ -24,5 +24,5 @@ export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK export KMP_HW_SUBSET=1T export KMP_AFFINITY=compact,1,0,granularity=fine -ccc_mprun gmx_mpi mdrun -pin off -notunepme -maxh 0.5 -ntomp $SLURM_CPUS_PER_TASK -nsteps 20000 -noconfout -s ion_channel.tpr -resethway +ccc_mprun gmx_mpi mdrun -pin off -notunepme -maxh 0.5 -ntomp $SLURM_CPUS_PER_TASK -nsteps 20000 -noconfout -s ion_channel.tpr -deffnm small_ion_channel -resethway grep Performance slurm* > results diff --git a/Gromacs/machines/jean-zay-cpu/batch_large_BSG.slurm b/Gromacs/machines/jean-zay-cpu/batch_large_BSG.slurm new file mode 100644 index 0000000000000000000000000000000000000000..6e0f59eb195f09bf3048a9a6235f9b1b3a9ef56f --- /dev/null +++ b/Gromacs/machines/jean-zay-cpu/batch_large_BSG.slurm @@ -0,0 +1,25 @@ +#!/bin/bash +#SBATCH -J benchBSG +#SBATCH -t 01:30:00 +#SBATCH --hint=nomultithread +#SBATCH -A qbg@cpu +#SBATCH --nodes=280 +#SBATCH --ntasks=560 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=20 + +module purge +module load intel-compilers/18.0.1 intel-mpi/18.0.1 intel-mkl/18.0.1 + +export PATH=GROMACS_HOME/bin:$PATH +export LD_LIBRARY_PATH=GROMACS_HOME/lib64:$LD_LIBRARY_PATH +export PKG_CONFIG_PATH=GROMACS_HOME/lib64/pkgconfig:$PKG_CONFIG_PATH +export GMXBIN=GROMACS_HOME/bin +export GMXLDLIB=GROMACS_HOME /lib64 +export GMXDATA=GROMACS_HOME/share/gromacs + +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK +export OMP_PLACES=cores + +srun gmx_mpi mdrun -g -pin off -notunepme -maxh 0.9 -ntomp $SLURM_CPUS_PER_TASK -nsteps 1200000 -noconfout -s systWT250nsGr2018woEne.tpr -deffnm benchBSG -resethway +grep Performance slurm* > results diff --git a/Gromacs/machines/jean-zay-cpu/batch_large_ion_channel.slurm b/Gromacs/machines/jean-zay-cpu/batch_large_ion_channel.slurm new file mode 100644 index 0000000000000000000000000000000000000000..b3f6b104197191a45c6b30246a0488f82b3c2f57 --- /dev/null +++ b/Gromacs/machines/jean-zay-cpu/batch_large_ion_channel.slurm @@ -0,0 +1,26 @@ +#!/bin/bash +#SBATCH -J gr_ion_channel +#SBATCH -t 00:30:00 +#SBATCH --hint=nomultithread +#SBATCH -A qbg@cpu +#SBATCH --nodes=64 +#SBATCH --ntasks=128 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=20 + +module purge +module load intel-compilers/18.0.1 intel-mpi/18.0.1 intel-mkl/18.0.1 + +export PATH=GROMACS_HOME/bin:$PATH +export LD_LIBRARY_PATH=GROMACS_HOME/lib64:$LD_LIBRARY_PATH +export PKG_CONFIG_PATH=GROMACS_HOME/lib64/pkgconfig:$PKG_CONFIG_PATH +export GMXBIN=GROMACS_HOME/bin +export GMXLDLIB=GROMACS_HOME/lib64 +export GMXDATA=GROMACS_HOME/share/gromacs + +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK +export KMP_HW_SUBSET=1T +export KMP_AFFINITY=compact,1,0,granularity=fine + +srun -n $SLURM_NTASKS gmx_mpi mdrun -pin off -notunepme -maxh 0.5 -ntomp $SLURM_CPUS_PER_TASK -nsteps 1000000 -noconfout -s ion_channel.tpr -deffnm large_ion_channel -resethway +grep Performance slurm* > results diff --git a/Gromacs/machines/jean-zay-cpu/batch_small_ion_channel.slurm b/Gromacs/machines/jean-zay-cpu/batch_small_ion_channel.slurm index 1618479b64ed397a44babdba6bd48a0b57767e34..36cdafd49b273f76a459fdf8608d316b5b30a398 100644 --- a/Gromacs/machines/jean-zay-cpu/batch_small_ion_channel.slurm +++ b/Gromacs/machines/jean-zay-cpu/batch_small_ion_channel.slurm @@ -4,10 +4,10 @@ #SBATCH --hint=nomultithread #SBATCH --qos=qos_cpu-dev #SBATCH -A qbg@cpu -#SBATCH --nodes=2 -#SBATCH --ntasks=20 -#SBATCH --ntasks-per-node=10 -#SBATCH --cpus-per-task=4 +#SBATCH --nodes=4 +#SBATCH --ntasks=8 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=20 module purge module load intel-compilers/18.0.1 intel-mpi/18.0.1 intel-mkl/18.0.1 @@ -21,8 +21,7 @@ export GMXDATA=GROMACS_HOME/share/gromacs export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK export KMP_HW_SUBSET=1T -export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK export KMP_AFFINITY=compact,1,0,granularity=fine -srun -n $SLURM_NTASKS gmx_mpi mdrun -pin off -notunepme -maxh 0.5 -ntomp $SLURM_CPUS_PER_TASK -nsteps 20000 -noconfout -s ion_channel.tpr -resethway +srun -n $SLURM_NTASKS gmx_mpi mdrun -pin off -notunepme -maxh 0.5 -ntomp $SLURM_CPUS_PER_TASK -nsteps 20000 -noconfout -s ion_channel.tpr -deffnm small_ion_channel -resethway grep Performance slurm* > results diff --git a/Gromacs/machines/jean-zay-gpu/batch_large_BSG.slurm b/Gromacs/machines/jean-zay-gpu/batch_large_BSG.slurm new file mode 100644 index 0000000000000000000000000000000000000000..e578697a2815e45438b353cf8c92fa9b3eb33111 --- /dev/null +++ b/Gromacs/machines/jean-zay-gpu/batch_large_BSG.slurm @@ -0,0 +1,27 @@ +#!/bin/bash +#SBATCH -J benchBSG +#SBATCH -t 01:30:00 +#SBATCH --hint=nomultithread +#SBATCH -A qbg@gpu +#SBATCH --nodes=70 +#SBATCH --ntasks=280 +#SBATCH --ntasks-per-node=4 +#SBATCH --cpus-per-task=10 +#SBATCH --gres=gpu:4 + +module purge +module load intel-compilers/18.0.1 intel-mpi/18.0.1 intel-mkl/18.0.1 gcc/6.5.0 cuda/10.1.1 + +export PATH=GROMACS_HOME/bin:$PATH +export LD_LIBRARY_PATH=GROMACS_HOME/lib64:$LD_LIBRARY_PATH +export PKG_CONFIG_PATH=GROMACS_HOME/lib64/pkgconfig:$PKG_CONFIG_PATH +export GMXBIN=GROMACS_HOME/bin +export GMXLDLIB=GROMACS_HOME /lib64 +export GMXDATA=GROMACS_HOME/share/gromacs + +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK +export KMP_HW_SUBSET=1T +export KMP_AFFINITY=compact,1,0,granularity=fine + +srun -n $SLURM_NTASKS gmx_mpi mdrun -g -pin off -notunepme -maxh 0.9 -ntomp $SLURM_CPUS_PER_TASK -nsteps 1200000 -noconfout -s systWT250nsGr2018woEne.tpr -deffnm benchBSG -resethway +grep Performance slurm* > results diff --git a/Gromacs/machines/jean-zay-gpu/batch_large_ion_channel.slurm b/Gromacs/machines/jean-zay-gpu/batch_large_ion_channel.slurm new file mode 100644 index 0000000000000000000000000000000000000000..f7d99887c1341da118cf910128672422a4450aba --- /dev/null +++ b/Gromacs/machines/jean-zay-gpu/batch_large_ion_channel.slurm @@ -0,0 +1,28 @@ +#!/bin/bash +#!/bin/bash +#SBATCH -J gr_ion_channel +#SBATCH -t 00:30:00 +#SBATCH --hint=nomultithread +#SBATCH -A qbg@gpu +#SBATCH --nodes=32 +#SBATCH --ntasks=128 +#SBATCH --ntasks-per-node=4 +#SBATCH --cpus-per-task=10 +#SBATCH --gres=gpu:4 + +module purge +module load intel-compilers/18.0.1 intel-mpi/18.0.1 intel-mkl/18.0.1 gcc/6.5.0 cuda/10.1.1 + +export PATH=GROMACS_HOME/bin:$PATH +export LD_LIBRARY_PATH=GROMACS_HOME/lib64:$LD_LIBRARY_PATH +export PKG_CONFIG_PATH=GROMACS_HOME/lib64/pkgconfig:$PKG_CONFIG_PATH +export GMXBIN=GROMACS_HOME/bin +export GMXLDLIB=GROMACS_HOME/lib64 +export GMXDATA=GROMACS_HOME/share/gromacs + +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK +export KMP_HW_SUBSET=1T +export KMP_AFFINITY=compact,1,0,granularity=fine + +srun -n $SLURM_NTASKS gmx_mpi mdrun -pin off -notunepme -maxh 0.5 -ntomp $SLURM_CPUS_PER_TASK -nsteps 1000000 -noconfout -s ion_channel.tpr -deffnm large_ion_channel -resethway +grep Performance slurm* > results diff --git a/Gromacs/machines/jean-zay-gpu/batch_small_ion_channel.slurm b/Gromacs/machines/jean-zay-gpu/batch_small_ion_channel.slurm index e5e73dab04557d23694f082e942d725d42bd7061..2cf04517e4c6f510216787a79df7b78bd56f2fce 100644 --- a/Gromacs/machines/jean-zay-gpu/batch_small_ion_channel.slurm +++ b/Gromacs/machines/jean-zay-gpu/batch_small_ion_channel.slurm @@ -2,13 +2,12 @@ #SBATCH -J gr_ion_channel #SBATCH -t 00:30:00 #SBATCH --hint=nomultithread -#SBATCH --qos=qos_gpu-dev #SBATCH -A qbg@gpu -#SBATCH --nodes=2 +#SBATCH --nodes=8 #SBATCH --ntasks=8 -#SBATCH --ntasks-per-node=4 -#SBATCH --cpus-per-task=10 -#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=1 +#SBATCH --cpus-per-task=40 +#SBATCH --gres=gpu:1 module purge module load intel-compilers/18.0.1 intel-mpi/18.0.1 intel-mkl/18.0.1 gcc/6.5.0 cuda/10.1.1 @@ -22,8 +21,7 @@ export GMXDATA=GROMACS_HOME/share/gromacs export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK export KMP_HW_SUBSET=1T -export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK export KMP_AFFINITY=compact,1,0,granularity=fine -srun -n $SLURM_NTASKS gmx_mpi mdrun -pin off -notunepme -maxh 0.5 -ntomp $SLURM_CPUS_PER_TASK -nsteps 20000 -noconfout -s ion_channel.tpr -resethway +srun -n $SLURM_NTASKS gmx_mpi mdrun -pin off -notunepme -maxh 0.5 -ntomp $SLURM_CPUS_PER_TASK -nsteps 20000 -noconfout -s ion_channel.tpr -deffnm small_ion_channel -resethway grep Performance slurm* > results diff --git a/Gromacs/machines/occigen-bdw/batch_large_BSG.slurm b/Gromacs/machines/occigen-bdw/batch_large_BSG.slurm new file mode 100644 index 0000000000000000000000000000000000000000..7745b70e0fa91b32d3b5a836b705e061fe9315a4 --- /dev/null +++ b/Gromacs/machines/occigen-bdw/batch_large_BSG.slurm @@ -0,0 +1,26 @@ +#!/bin/bash +#SBATCH -J benchBSG +#SBATCH -C BDW28 +#SBATCH -t 01:30:00 +#SBATCH --nodes=280 +#SBATCH --ntasks=560 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=14 +#SBATCH --threads-per-core=1 + +module purge +module load intel/18.1 intelmpi/2018.1.163 + +export PATH=GROMACS_HOME/bin:$PATH +export LD_LIBRARY_PATH=GROMACS_HOME/lib64:$LD_LIBRARY_PATH +export PKG_CONFIG_PATH=GROMACS_HOME/lib64/pkgconfig:$PKG_CONFIG_PATH +export GMXBIN=GROMACS_HOME/bin +export GMXLDLIB=GROMACS_HOME /lib64 +export GMXDATA=GROMACS_HOME/share/gromacs + +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK +export KMP_HW_SUBSET=1T +export KMP_AFFINITY=compact,1,0,granularity=fine + +srun -n $SLURM_NTASKS gmx_mpi mdrun -g -pin off -notunepme -maxh 0.9 -ntomp $SLURM_CPUS_PER_TASK -nsteps 1200000 -noconfout -s systWT250nsGr2018woEne.tpr -deffnm benchBSG -resethway +grep Performance slurm* > results diff --git a/Gromacs/machines/occigen-bdw/batch_large_ion_channel.slurm b/Gromacs/machines/occigen-bdw/batch_large_ion_channel.slurm new file mode 100644 index 0000000000000000000000000000000000000000..00cfc09aeea1ced760faa65db1c9ba5e16f02900 --- /dev/null +++ b/Gromacs/machines/occigen-bdw/batch_large_ion_channel.slurm @@ -0,0 +1,26 @@ +#!/bin/bash +#SBATCH -J ion_channel +#SBATCH -C BDW28 +#SBATCH -t 00:30:00 +#SBATCH --nodes=64 +#SBATCH --ntasks=128 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=14 +#SBATCH --threads-per-core=1 + +module purge +module load intel/18.1 intelmpi/2018.1.163 + +export PATH=GROMACS_HOME/bin:$PATH +export LD_LIBRARY_PATH=GROMACS_HOME/lib64:$LD_LIBRARY_PATH +export PKG_CONFIG_PATH=GROMACS_HOME/lib64/pkgconfig:$PKG_CONFIG_PATH +export GMXBIN=GROMACS_HOME/bin +export GMXLDLIB=GROMACS_HOME/lib64 +export GMXDATA=GROMACS_HOME/share/gromacs + +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK +export KMP_HW_SUBSET=1T +export KMP_AFFINITY=compact,1,0,granularity=fine + +srun -n $SLURM_NTASKS gmx_mpi mdrun -pin off -notunepme -maxh 0.5 -ntomp $SLURM_CPUS_PER_TASK -nsteps 1000000 -noconfout -s ion_channel.tpr -deffnm large_ion_channel -resethway +grep Performance slurm* > results diff --git a/Gromacs/machines/occigen-bdw/batch_small_ion_channel.slurm b/Gromacs/machines/occigen-bdw/batch_small_ion_channel.slurm index f89d393f0fd7d77d5f31d4853d57e37de2ef24d1..7c9a79316f577e5a3a3bda36ec73a0a0d501bee4 100644 --- a/Gromacs/machines/occigen-bdw/batch_small_ion_channel.slurm +++ b/Gromacs/machines/occigen-bdw/batch_small_ion_channel.slurm @@ -2,10 +2,10 @@ #SBATCH -J ion_channel #SBATCH -C BDW28 #SBATCH -t 00:30:00 -#SBATCH --nodes=2 +#SBATCH --nodes=8 #SBATCH --ntasks=8 -#SBATCH --ntasks-per-node=4 -#SBATCH --cpus-per-task=7 +#SBATCH --ntasks-per-node=1 +#SBATCH --cpus-per-task=28 #SBATCH --threads-per-core=1 module purge @@ -20,8 +20,7 @@ export GMXDATA=GROMACS_HOME/share/gromacs export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK export KMP_HW_SUBSET=1T -export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK export KMP_AFFINITY=compact,1,0,granularity=fine -srun -n $SLURM_NTASKS gmx_mpi mdrun -pin off -notunepme -maxh 0.5 -ntomp $SLURM_CPUS_PER_TASK -nsteps 20000 -noconfout -s ion_channel.tpr -resethway +srun -n $SLURM_NTASKS gmx_mpi mdrun -pin off -notunepme -maxh 0.5 -ntomp $SLURM_CPUS_PER_TASK -nsteps 20000 -noconfout -s ion_channel.tpr -deffnm small_ion_channel -resethway grep Performance slurm* > results diff --git a/Gromacs/testcase_BSG_large/prepare.sh b/Gromacs/testcase_BSG_large/prepare.sh new file mode 100755 index 0000000000000000000000000000000000000000..95e561412a46bea9e51998ca1f39b73bbf04afb7 --- /dev/null +++ b/Gromacs/testcase_BSG_large/prepare.sh @@ -0,0 +1,19 @@ +#!/bin/bash +compile_gromacs_dir_name=$1 + +if [ -z "$compile_gromacs_dir_name" ] +then +echo "compile_gromacs_dir_name missing" +echo "example : ./prepare.sh compile_01-13-20-16-20-05" +exit 1 +fi + +run_date=`date +"%m-%d-%y-%H-%M-%S"` +run_dir=run_${run_date}_${compile_gromacs_dir_name} +mkdir $run_dir +cd $run_dir +cp ../systWT250nsGr2018woEne.tpr . +cp ../../machines/${GROMACS_MACHINE}/batch_large_BSG.slurm batch_large_BSG.slurm.tmp.tmp +awk '$1=$1' FS="GROMACS_HOME" OFS="../../${compile_gromacs_dir_name}/gromacs/2019.4" batch_large_BSG.slurm.tmp.tmp > batch_large_BSG.slurm.tmp +awk '$1=$1' FS="GROMACS_HOME" OFS="../../${compile_gromacs_dir_name}/gromacs/2019.4" batch_large_BSG.slurm.tmp > batch_large_BSG.slurm +rm -f batch_large_BSG.slurm.tmp.tmp batch_large_BSG.slurm.tmp diff --git a/Gromacs/testcase_BSG_large/run.sh b/Gromacs/testcase_BSG_large/run.sh new file mode 100755 index 0000000000000000000000000000000000000000..b85b9a54daa1636e8cd962400b3eb57269b061c7 --- /dev/null +++ b/Gromacs/testcase_BSG_large/run.sh @@ -0,0 +1,12 @@ +#!/bin/bash +run_gromacs_dir_name=$1 + +if [ -z "$run_gromacs_dir_name" ] +then +echo "run_gromacs_dir_name missing" +echo "example : ./run.sh run_01-13-20-17-10-24_compile_01-13-20-16-20-05" +exit 1 +fi + +cd $run_gromacs_dir_name +${GROMACS_LAUNCH_SCRIPT_COMMAND} batch_large_BSG.slurm diff --git a/Gromacs/testcase_BSG_large/systWT250nsGr2018woEne.tpr b/Gromacs/testcase_BSG_large/systWT250nsGr2018woEne.tpr new file mode 100644 index 0000000000000000000000000000000000000000..0173e0a0e1a5d42d3fd988a42023b5fc60910838 Binary files /dev/null and b/Gromacs/testcase_BSG_large/systWT250nsGr2018woEne.tpr differ diff --git a/Gromacs/testcase_BSG_large/validate.sh b/Gromacs/testcase_BSG_large/validate.sh new file mode 100755 index 0000000000000000000000000000000000000000..b822ad73eeb5000272a5700a90cf66b4eb5bbbf2 --- /dev/null +++ b/Gromacs/testcase_BSG_large/validate.sh @@ -0,0 +1,22 @@ +#!/bin/bash +run_gromacs_dir_name=$1 + +if [ -z "$run_gromacs_dir_name" ] +then +echo "run_gromacs_dir_name missing" +echo "example : ./validate.sh run_01-13-20-17-10-24_compile_01-13-20-16-20-05" +exit 1 +fi + +cd $run_gromacs_dir_name +perf=`grep Performance: results | awk '{ print $2 }'` +end_sim=`grep "Statistics over 1200001 steps using 12001 frames" benchBSG.log` + +if [ -z "$perf" ] || [ -z "$end_sim" ] +then + echo "bench is not validated" +else + echo bench is validated + echo end = $end_sim + echo perf = $perf ns/day +fi diff --git a/Gromacs/testcase_ion_channel_large/ion_channel.tpr b/Gromacs/testcase_ion_channel_large/ion_channel.tpr new file mode 100644 index 0000000000000000000000000000000000000000..abcd87e04f2bfbe4e2db67ff0d2e517892b9169c Binary files /dev/null and b/Gromacs/testcase_ion_channel_large/ion_channel.tpr differ diff --git a/Gromacs/testcase_ion_channel_large/prepare.sh b/Gromacs/testcase_ion_channel_large/prepare.sh new file mode 100755 index 0000000000000000000000000000000000000000..27e3bd714093c6bc4a7831249f91bc00a9ca1c12 --- /dev/null +++ b/Gromacs/testcase_ion_channel_large/prepare.sh @@ -0,0 +1,19 @@ +#!/bin/bash +compile_gromacs_dir_name=$1 + +if [ -z "$compile_gromacs_dir_name" ] +then +echo "compile_gromacs_dir_name missing" +echo "example : ./prepare.sh compile_01-13-20-16-20-05" +exit 1 +fi + +run_date=`date +"%m-%d-%y-%H-%M-%S"` +run_dir=run_${run_date}_${compile_gromacs_dir_name} +mkdir $run_dir +cd $run_dir +cp ../ion_channel.tpr . +cp ../../machines/${GROMACS_MACHINE}/batch_large_ion_channel.slurm batch_large_ion_channel.slurm.tmp.tmp +awk '$1=$1' FS="GROMACS_HOME" OFS="../../${compile_gromacs_dir_name}/gromacs/2019.4" batch_large_ion_channel.slurm.tmp.tmp > batch_large_ion_channel.slurm.tmp +awk '$1=$1' FS="GROMACS_HOME" OFS="../../${compile_gromacs_dir_name}/gromacs/2019.4" batch_large_ion_channel.slurm.tmp > batch_large_ion_channel.slurm +rm -f batch_large_ion_channel.slurm.tmp.tmp batch_large_ion_channel.slurm.tmp diff --git a/Gromacs/testcase_ion_channel_large/run.sh b/Gromacs/testcase_ion_channel_large/run.sh new file mode 100755 index 0000000000000000000000000000000000000000..69292702b622a5c38445abe28e4b0b86fdd169a8 --- /dev/null +++ b/Gromacs/testcase_ion_channel_large/run.sh @@ -0,0 +1,12 @@ +#!/bin/bash +run_gromacs_dir_name=$1 + +if [ -z "$run_gromacs_dir_name" ] +then +echo "run_gromacs_dir_name missing" +echo "example : ./run.sh run_01-13-20-17-10-24_compile_01-13-20-16-20-05" +exit 1 +fi + +cd $run_gromacs_dir_name +${GROMACS_LAUNCH_SCRIPT_COMMAND} batch_large_ion_channel.slurm diff --git a/Gromacs/testcase_ion_channel_large/validate.sh b/Gromacs/testcase_ion_channel_large/validate.sh new file mode 100755 index 0000000000000000000000000000000000000000..1717cb965c314bd193bb7d432b65e5e66c13bb69 --- /dev/null +++ b/Gromacs/testcase_ion_channel_large/validate.sh @@ -0,0 +1,22 @@ +#!/bin/bash +run_gromacs_dir_name=$1 + +if [ -z "$run_gromacs_dir_name" ] +then +echo "run_gromacs_dir_name missing" +echo "example : ./validate.sh run_01-13-20-17-10-24_compile_01-13-20-16-20-05" +exit 1 +fi + +cd $run_gromacs_dir_name +perf=`grep Performance results | awk '{ print $2 }'` +end_sim=`grep "Statistics over 1000001 steps using 100001 frames" large_ion_channel.log` + +if [ -z "$perf" ] || [ -z "$end_sim" ] +then + echo "bench is not validated" +else + echo bench is validated + echo end = $end_sim + echo perf = $perf ns/day +fi diff --git a/Gromacs/testcase_ion_channel_small/batch_small_ion_channel_occgen.aps.slurm b/Gromacs/testcase_ion_channel_small/batch_small_ion_channel_occgen.aps.slurm index cb6a4034e9f258b20200f5ba31db7f113d65da32..21f5ab13109bd43f1d092c7055c174bfae3c4403 100644 --- a/Gromacs/testcase_ion_channel_small/batch_small_ion_channel_occgen.aps.slurm +++ b/Gromacs/testcase_ion_channel_small/batch_small_ion_channel_occgen.aps.slurm @@ -24,4 +24,4 @@ export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK export KMP_HW_SUBSET=1T export KMP_AFFINITY=compact,1,0,granularity=fine -srun -n $SLURM_NTASKS aps gmx_mpi mdrun -pin off -notunepme -maxh 0.5 -ntomp $SLURM_CPUS_PER_TASK -nsteps 20000 -noconfout -s ion_channel.tpr -resethway +srun -n $SLURM_NTASKS aps gmx_mpi mdrun -pin off -notunepme -maxh 0.5 -ntomp $SLURM_CPUS_PER_TASK -nsteps 20000 -noconfout -s ion_channel.tpr -deffnm small_ion_channel -resethway diff --git a/Gromacs/testcase_ion_channel_small/validate.sh b/Gromacs/testcase_ion_channel_small/validate.sh index 89e5a8aa98193fac5f588cd43a732c20e840b91a..b6cb451214423011bcf389e91a44d44c9034d9eb 100755 --- a/Gromacs/testcase_ion_channel_small/validate.sh +++ b/Gromacs/testcase_ion_channel_small/validate.sh @@ -10,11 +10,13 @@ fi cd $run_gromacs_dir_name perf=`grep Performance results | awk '{ print $2 }'` +end_sim=`grep "Statistics over 20001 steps using 2001 frames" small_ion_channel.log` -if [ -z "$perf" ] +if [ -z "$perf" ] || [ -z "$end_sim" ] then echo "bench is not validated" else echo bench is validated + echo end = $end_sim echo perf = $perf ns/day fi diff --git a/Smilei/machines/irene-amd/batch_large.slurm b/Smilei/machines/irene-amd/batch_large.slurm new file mode 100644 index 0000000000000000000000000000000000000000..2698b7c45707d497ba8c0c5032c907ff1aaa8998 --- /dev/null +++ b/Smilei/machines/irene-amd/batch_large.slurm @@ -0,0 +1,25 @@ +#!/bin/bash +#MSUB -r tst_large # job name +#MSUB -N 200 # number of nodes to use +#MSUB -n 800 # number of tasks +#MSUB -c 16 # number of threads per task to use +#MSUB -T 7200 # max walltime in seconds. default=7200 +#MSUB -m scratch # file system +#MSUB -e error_%I.e # error output +#MSUB -o output_%I.o +#MSUB -A tgcc0090 # project +#MSUB -q rome #partition of machine + +## Set the working dir to the dir in which the submission was called. + +source env_amd + +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK +export KMP_AFFINITY=granularity=fine,compact,1,0,verbose + +mkdir -p large_output +cd large_output + +#ccc_mprun --mpi=pmi2 -K1 -m block:block -c $SLURM_CPUS_PER_TASK --resv-ports -n $SLURM_NTASKS $EXE ../test.py +ccc_mprun $EXE ../test.py + diff --git a/Smilei/machines/irene-amd/batch_small.slurm b/Smilei/machines/irene-amd/batch_small.slurm index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..06d6b09379c68d596c7d9a9129955b6ea6459d25 100644 --- a/Smilei/machines/irene-amd/batch_small.slurm +++ b/Smilei/machines/irene-amd/batch_small.slurm @@ -0,0 +1,26 @@ +#!/bin/bash +#MSUB -r tst_small # job name +#MSUB -N 50 # number of nodes to use +#MSUB -n 200 # number of tasks +#MSUB -c 16 # number of threads per task to use +#MSUB -T 10800 # max walltime in seconds. default=7200 +#MSUB -m scratch # file system +#MSUB -e error_%I.e # error output +#MSUB -o output_%I.o +#MSUB -A tgcc0090 # project +#MSUB -q rome #partition of machine + +## Set the working dir to the dir in which the submission was called. + +source env_amd + +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK +export KMP_AFFINITY=granularity=fine,compact,1,0,verbose + +mkdir -p small_output +cd small_output + +#ccc_mprun --mpi=pmi2 -K1 -m block:block -c $SLURM_CPUS_PER_TASK --resv-ports -n $SLURM_NTASKS $EXE ../test.py +ccc_mprun $EXE ../test.py + + diff --git a/Smilei/machines/irene-amd/env_bench b/Smilei/machines/irene-amd/env_bench new file mode 100644 index 0000000000000000000000000000000000000000..b73ff2e65c934f4cc5c855a545b714f6e6ad53cb --- /dev/null +++ b/Smilei/machines/irene-amd/env_bench @@ -0,0 +1,19 @@ +#machine file for Irene Joliot-Curie using AMD-ROME +# __________________________________________________________ +# +# Load the correct modules: +# +module purge +module load intel/19.0.5.281 +module load mpi/intelmpi/2019.0.5.281 +module load flavor/hdf5/parallel +module load hdf5/1.8.20 +module load python3/3.7.2 + +export CXXFLAGS+="-xHOST -axAVX -O3 -ip -inline-factor=1000 -qopt-zmm-usage=high " + +GIT_ROOT=`git rev-parse --show-toplevel` +SMILEI_DIR=$GIT_ROOT/Smilei/ +EXE=$SMILEI_DIR/Smilei/smilei + +