Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
support
hello_world
Commits
396e42fd
Commit
396e42fd
authored
Mar 08, 2018
by
hautreux
Browse files
Merge branch 'structure_modification' into dev
restructuration
parents
d2dc2ab7
a62d2371
Changes
15
Hide whitespace changes
Inline
Side-by-side
test_scripts/frioul/hybrid.sh
→
test_scripts/frioul/hybrid
/cache
.sh
View file @
396e42fd
...
...
@@ -5,7 +5,7 @@
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=17
#SBATCH --time=0:40:00
#SBATCH -C quad,cache
|quad,flat
#SBATCH -C quad,cache
#SBATCH --exclusive
#SBATCH --mem=50GB
#SBATCH --output hybrid.output.slurm
...
...
@@ -26,5 +26,5 @@ ulimit -s unlimited
rm
-f
*
.out
srun ../../bin/hello_hybrid
srun ../../
../
bin/hello_hybrid
test_scripts/frioul/hybrid/flat.sh
0 → 100644
View file @
396e42fd
#!/bin/bash
#SBATCH -J hybrid
#SBATCH --nodes=7
#SBATCH --ntasks=28
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=17
#SBATCH --time=0:40:00
#SBATCH -C quad,flat
#SBATCH --exclusive
#SBATCH --mem=50GB
#SBATCH --output hybrid.output.slurm
set
-e
export
I_MPI_DOMAIN
=
auto
export
I_MPI_PIN_RESPECT_CPUSET
=
0
#Make sure that OMP_NUM_THREADS = cpus-per-task * KMP_HW_SUBSET
export
KMP_HW_SUBSET
=
1T
export
OMP_NUM_THREADS
=
17
export
KMP_AFFINITY
=
compact,1,0,granularity
=
fine
module load intel intelmpi
ulimit
-s
unlimited
rm
-f
*
.out
srun ../../../bin/hello_hybrid
test_scripts/frioul/mpi/cache.sh
0 → 100644
View file @
396e42fd
#!/bin/bash
#SBATCH -J mpi
#SBATCH --nodes=7
#SBATCH --ntasks=476
#SBATCH --ntasks-per-node=68
#SBATCH --cpus-per-task=1
#SBATCH --time=0:40:00
#SBATCH -C quad,cache
#SBATCH --exclusive
#SBATCH --mem=50GB
#SBATCH --output mpi.output.slurm
set
-e
export
I_MPI_DOMAIN
=
auto
export
I_MPI_PIN_RESPECT_CPUSET
=
0
module load intel intelmpi
ulimit
-s
unlimited
srun ../../../bin/hello_mpi
test_scripts/frioul/mpi.sh
→
test_scripts/frioul/mpi
/flat
.sh
View file @
396e42fd
...
...
@@ -20,6 +20,6 @@ module load intel intelmpi
ulimit
-s
unlimited
srun ../../bin/hello_mpi
srun ../../
../
bin/hello_mpi
test_scripts/frioul/mpi
_mixed
.sh
→
test_scripts/frioul/mpi
/flat_and_cache
.sh
View file @
396e42fd
...
...
@@ -9,13 +9,6 @@
#EXPLICIT AND
#SBATCH -C [quad,flat*4&quad,cache*3]
#EXCLUSIVE OR
#SBATCH --constraint=[flat|cache]
#OR
#SBATCH -C flat|cache
#SBATCH --exclusive
#SBATCH --mem=50GB
#SBATCH --output mpi.output.slurm
...
...
@@ -30,6 +23,6 @@ module load intel intelmpi
ulimit
-s
unlimited
srun ../../bin/hello_mpi
srun ../../
../
bin/hello_mpi
test_scripts/frioul/mpi/flat_or_cache.sh
0 → 100644
View file @
396e42fd
#!/bin/bash
#SBATCH -J mpi
#SBATCH --nodes=7
#SBATCH --ntasks=476
#SBATCH --ntasks-per-node=68
#SBATCH --cpus-per-task=1
#SBATCH --time=0:40:00
#OR
#SBATCH -C flat|cache
#SBATCH --exclusive
#SBATCH --mem=50GB
#SBATCH --output mpi.output.slurm
set
-e
export
I_MPI_DOMAIN
=
auto
export
I_MPI_PIN_RESPECT_CPUSET
=
0
module load intel intelmpi
ulimit
-s
unlimited
srun ../../../bin/hello_mpi
test_scripts/frioul/mpi/flat_xor_cache.sh
0 → 100644
View file @
396e42fd
#!/bin/bash
#SBATCH -J mpi
#SBATCH --nodes=7
#SBATCH --ntasks=476
#SBATCH --ntasks-per-node=68
#SBATCH --cpus-per-task=1
#SBATCH --time=0:40:00
#EXCLUSIVE OR
#SBATCH --constraint=[flat|cache]
#SBATCH --exclusive
#SBATCH --mem=50GB
#SBATCH --output mpi.output.slurm
set
-e
export
I_MPI_DOMAIN
=
auto
export
I_MPI_PIN_RESPECT_CPUSET
=
0
module load intel intelmpi
ulimit
-s
unlimited
srun ../../../bin/hello_mpi
test_scripts/frioul/omp.sh
→
test_scripts/frioul/omp
/cache
.sh
View file @
396e42fd
...
...
@@ -5,7 +5,7 @@
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=68
#SBATCH --time=0:40:00
#SBATCH -C quad,cache
|quad,flat
#SBATCH -C quad,cache
#SBATCH --exclusive
#SBATCH --mem=50GB
#SBATCH --output omp.output.slurm
...
...
@@ -23,6 +23,6 @@ ulimit -s unlimited
rm
-f
*
.out
srun ../../bin/hello_omp
srun ../../
../
bin/hello_omp
test_scripts/frioul/omp/flat.sh
0 → 100644
View file @
396e42fd
#!/bin/bash
#SBATCH -J omp
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=68
#SBATCH --time=0:40:00
#SBATCH -C quad,flat
#SBATCH --exclusive
#SBATCH --mem=50GB
#SBATCH --output omp.output.slurm
set
-e
#Make sure that OMP_NUM_THREADS = cpus-per-task * KMP_HW_SUBSET
export
KMP_HW_SUBSET
=
1T
export
OMP_NUM_THREADS
=
68
export
KMP_AFFINITY
=
compact,1,0,granularity
=
fine
module load intel
ulimit
-s
unlimited
rm
-f
*
.out
srun ../../../bin/hello_omp
test_scripts/occigen/
bdw28/bdw28_
hybrid.sh
→
test_scripts/occigen/hybrid
/bdw28
.sh
View file @
396e42fd
File moved
test_scripts/occigen/h
sw24
/hsw24
_hybrid
.sh
→
test_scripts/occigen/h
ybrid
/hsw24.sh
View file @
396e42fd
File moved
test_scripts/occigen/
bdw28
/bdw28
_mpi
.sh
→
test_scripts/occigen/
mpi
/bdw28.sh
View file @
396e42fd
File moved
test_scripts/occigen/
hsw24
/hsw24
_mpi
.sh
→
test_scripts/occigen/
mpi
/hsw24.sh
View file @
396e42fd
File moved
test_scripts/occigen/
bdw28
/bdw28
_omp
.sh
→
test_scripts/occigen/
omp
/bdw28.sh
View file @
396e42fd
File moved
test_scripts/occigen/
hsw24
/hsw24
_omp
.sh
→
test_scripts/occigen/
omp
/hsw24.sh
View file @
396e42fd
File moved
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment