Commit 0c0bfdae authored by Gab's avatar Gab
Browse files

Merge branch 'delete_flat_mode_frioul' into 'dev'

Delete Frioul submission script with flat constraint

See merge request !3
parents b3b16609 0230bbb1
#!/bin/bash
#SBATCH -J hybrid
#SBATCH --nodes=7
#SBATCH --ntasks=28
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=17
#SBATCH --time=0:40:00
#SBATCH -C quad,flat
#SBATCH --exclusive
#SBATCH --mem=50GB
#SBATCH --output hybrid.output.slurm
set -e
export I_MPI_DOMAIN=auto
export I_MPI_PIN_RESPECT_CPUSET=0
#Make sure that OMP_NUM_THREADS = cpus-per-task * KMP_HW_SUBSET
export KMP_HW_SUBSET=1T
export OMP_NUM_THREADS=17
export KMP_AFFINITY=compact,1,0,granularity=fine
module load intel intelmpi
ulimit -s unlimited
rm -f *.out
srun ../../../bin/hello_hybrid
#!/bin/bash
#SBATCH -J mpi
#SBATCH --nodes=7
#SBATCH --ntasks=476
#SBATCH --ntasks-per-node=68
#SBATCH --cpus-per-task=1
#SBATCH --time=0:40:00
#SBATCH -C quad,cache|quad,flat
#SBATCH --exclusive
#SBATCH --mem=50GB
#SBATCH --output mpi.output.slurm
set -e
export I_MPI_DOMAIN=auto
export I_MPI_PIN_RESPECT_CPUSET=0
module load intel intelmpi
ulimit -s unlimited
srun ../../../bin/hello_mpi
#!/bin/bash
#SBATCH -J mpi
#SBATCH --nodes=7
#SBATCH --ntasks=476
#SBATCH --ntasks-per-node=68
#SBATCH --cpus-per-task=1
#SBATCH --time=0:40:00
#EXPLICIT AND
#SBATCH -C [quad,flat*4&quad,cache*3]
#SBATCH --exclusive
#SBATCH --mem=50GB
#SBATCH --output mpi.output.slurm
set -e
export I_MPI_DOMAIN=auto
export I_MPI_PIN_RESPECT_CPUSET=0
module load intel intelmpi
ulimit -s unlimited
srun ../../../bin/hello_mpi
#!/bin/bash
#SBATCH -J mpi
#SBATCH --nodes=7
#SBATCH --ntasks=476
#SBATCH --ntasks-per-node=68
#SBATCH --cpus-per-task=1
#SBATCH --time=0:40:00
#OR
#SBATCH -C flat|cache
#SBATCH --exclusive
#SBATCH --mem=50GB
#SBATCH --output mpi.output.slurm
set -e
export I_MPI_DOMAIN=auto
export I_MPI_PIN_RESPECT_CPUSET=0
module load intel intelmpi
ulimit -s unlimited
srun ../../../bin/hello_mpi
#!/bin/bash
#SBATCH -J mpi
#SBATCH --nodes=7
#SBATCH --ntasks=476
#SBATCH --ntasks-per-node=68
#SBATCH --cpus-per-task=1
#SBATCH --time=0:40:00
#EXCLUSIVE OR
#SBATCH --constraint=[flat|cache]
#SBATCH --exclusive
#SBATCH --mem=50GB
#SBATCH --output mpi.output.slurm
set -e
export I_MPI_DOMAIN=auto
export I_MPI_PIN_RESPECT_CPUSET=0
module load intel intelmpi
ulimit -s unlimited
srun ../../../bin/hello_mpi
#!/bin/bash
#SBATCH -J omp
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=68
#SBATCH --time=0:40:00
#SBATCH -C quad,flat
#SBATCH --exclusive
#SBATCH --mem=50GB
#SBATCH --output omp.output.slurm
set -e
#Make sure that OMP_NUM_THREADS = cpus-per-task * KMP_HW_SUBSET
export KMP_HW_SUBSET=1T
export OMP_NUM_THREADS=68
export KMP_AFFINITY=compact,1,0,granularity=fine
module load intel
ulimit -s unlimited
rm -f *.out
srun ../../../bin/hello_omp
#!/bin/bash
#SBATCH -J omp
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --time=0:40:00
#SBATCH -C flat|cache
#SBATCH --exclusive
#SBATCH --mem=50GB
#SBATCH --output omp.output.slurm
set -e
#Make sure that OMP_NUM_THREADS = cpus-per-task * KMP_HW_SUBSET
export KMP_HW_SUBSET=2T
export OMP_NUM_THREADS=136
export KMP_AFFINITY=compact,1,0,granularity=fine
module load intel
ulimit -s unlimited
rm -f *.out
srun --cpus-per-task=$SLURM_CPUS_ON_NODE ../../../bin/hello_omp
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment