Skip to content
Snippets Groups Projects
Commit 71c896d0 authored by acloirec's avatar acloirec
Browse files

modify program for output readability

parent 198871bd
No related branches found
No related tags found
1 merge request!2Release
#include <omp.h>
#include <stdio.h>
#include <mpi.h>
#include <stdlib.h>
#include <sys/unistd.h>
int main(int argc,char**argv)
{
int mpiRank, mpiSize;
char hostname[128];
int nthreads, tid, cpuid;
int i, j=0;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank);
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize);
gethostname(hostname, sizeof hostname);
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
if ( mpiRank== 0)
printf("Run executed using %d MPI processes, with %d threads per process \n", mpiSize, nthreads);
for(i = 0; i < mpiSize; i++) {
MPI_Barrier(MPI_COMM_WORLD);
if (i == mpiRank) {
printf("%s: MPI n° %d -> cpuid %d \n",hostname, mpiRank,sched_getcpu());
#pragma omp parallel private(tid, nthreads, cpuid) shared(i)
{
tid=omp_get_thread_num();
nthreads = omp_get_num_threads();
cpuid = sched_getcpu();
while(j < tid){
#pragma omp flush(j)
}
printf("\t thread n° %d -> cpuid %d on MPI n° %d on %s\n", tid, cpuid, mpiRank,hostname);
j++;
#pragma omp flush(j)
}
}
}
MPI_Finalize();
}
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment