MPI Program Examples



/* MPI Lab 1, Example Program   */



#include 
#include "mpi.h"

int main(argc, argv)
int argc;
char **argv;
{
   int rank, size;
   MPI_Init(&argc,&argv);
   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
   MPI_Comm_size(MPI_COMM_WORLD, &size);
   printf("Hello world! I am %d of %d\n",rank,size);
   MPI_Finalize();
   return 0;
}

/* MPI Lab 2a, Example Program   */


#include 
#include "mpi.h"

int main(argc, argv)
int argc;
char **argv;
{
   int rank, size, n, to, from, tagno;
   MPI_Status status;
   n = -1;
   MPI_Init(&argc,&argv);
   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
   MPI_Comm_size(MPI_COMM_WORLD, &size);
   to = rank + 1;
   if (rank == size -1) to = 0;
   from = rank - 1;
   tagno = 201;
   printf("Process  %d of %d is alive\n",rank,size);
   if (rank == 0){
      from = size - 1;
      printf("Please enter a positive integer\n");
      scanf("%d",&n);
      printf("n = %d\n",n);
      MPI_Send(&n,1,MPI_INT,to,tagno,MPI_COMM_WORLD);
   }
   while (1){
      from = MPI_ANY_SOURCE;
      MPI_Recv(&n,1,MPI_INT,from,tagno,MPI_COMM_WORLD, &status); 
      printf ("Rank %d received %d\n",rank, n);
      if (rank == 0) {n--;tagno++;}
      MPI_Send(&n,1,MPI_INT,to,tagno,MPI_COMM_WORLD);
      if (rank != 0) {n--;tagno++;}
      if (n<0){
        MPI_Finalize();
        return 0;
      }
   }
}



/* MPI Lab 2b, Example Program   */


/*
 * MPI Developers Conference 96
 * http://www.cse.nd.edu/mpidc95/
 * University of Notre Dame
 *
 * MPI Tutorial
 * Lab 2b
 *
 * Mail questions regarding tutorial material to mpidc@lsc.cse.nd.edu
 */

#include 
#include "mpi.h"


int main(argc, argv)
int argc;
char **argv;
{
  MPI_Status status;
  MPI_Request request[2];
  int i, num, msg1, msg2[8192];
  int rank, size, tag, next, prev, index;
  const int tag1 = 201, tag2 = 202;

  /* Start up MPI */

  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &size);

  next = (rank + 1) % size;
  prev = (rank + size - 1) %size;
 
  if (rank == 0) {
    printf("Enter the number of times around the ring: ");
    scanf("%d", &num);

    msg1 = num;
    for (i = 0; i < 8192; i++)
      msg2[i] = num;

    MPI_Send(&msg1, 1, MPI_INT, next, tag1, MPI_COMM_WORLD); 
    MPI_Send(msg2, 8192, MPI_INT, prev, tag2, MPI_COMM_WORLD); 
  } 

  MPI_Irecv(&msg1, 1, MPI_INT, prev, tag1, MPI_COMM_WORLD, &request[0]);
  MPI_Irecv(msg2, 8192, MPI_INT, next, tag2, MPI_COMM_WORLD, &request[1]);

  while (1) {
    MPI_Waitany(2, request, &index, &status);
    printf("%d: %d %d\n", rank, msg1, msg2[0]);
    
    if (rank == 0 && status.MPI_TAG == tag1) {
      msg1--;
      printf("Process 0 decremented msg1\n", msg1);
    } else if (rank == 0 && status.MPI_TAG == tag2) {
      for (i = 0; i < 8192; i++)
	msg2[i]--;
      printf("Process 0 decremented msg2 %d\n", msg2[0]);
    }
    
    if (status.MPI_TAG == tag1) {
      printf("MESSAGE ONE received:  %d --> %d\n", status.MPI_SOURCE, rank);
      MPI_Irecv(&msg1, 1, MPI_INT, prev, tag1, MPI_COMM_WORLD, &request[0]);
      if (msg1 >=0)
	MPI_Send(&msg1, 1, MPI_INT, next, tag1, MPI_COMM_WORLD);
    } else {
      printf("MESSAGE TWO received:  %d --> %d\n", status.MPI_SOURCE, rank);
      MPI_Irecv(msg2, 8192, MPI_INT, next, tag2, MPI_COMM_WORLD, &request[1]); 
      if (msg2[0] >=0)
	MPI_Send(msg2, 8192, MPI_INT, prev, tag2, MPI_COMM_WORLD); 
    }
    
    if (rank == 0) {
      if (msg1 == -1 && msg2[0] == -1) {
	printf("Process %d exiting\n", rank);
	break;
      }
    } else {
      if (msg1 == 0 && msg2[0] == 0) {
	printf("Process %d exiting\n", rank);
	break;
      }
    }
  }

  /* Quit */

  MPI_Finalize();

  return 0;
}


/* MPI Lab 3, Example Program   */

/*
 * MPI Developers Conference 96
 * http://www.cse.nd.edu/mpidc95/
 * University of Notre Dame
 *
 * MPI Tutorial
 * Lab 3
 *
 * Mail questions regarding tutorial material to mpidc@lsc.cse.nd.edu
 */


#include "mpi.h"
#include 
#include 
#include 


extern int loadtiff_(char *fileName, unsigned char *image, int *iw, int *ih);
extern int dumptiff_(char *fileName, unsigned char *image, int *w, int *h);

int
main(int argc, char *argv[])
{
  int width = 256, height = 256;
  int rank, comm_size, sum, my_sum;
  unsigned char pixels[65536];
  unsigned char recvbuf[65536];
  int numpixels, my_count;
  int i, val;
  double rms;

  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
  

  if (rank == 0) {

    /* Load the Image */
    loadtiff_("irish.tif", pixels, &width, &height);
    numpixels = width * height;
 

    /* Calculate the number of pixels in each sub image */
    my_count = numpixels / comm_size;
  }

  /* Broadcasts my_count to all the processes */

  MPI_Bcast( &my_count, 1, MPI_INT, 0, MPI_COMM_WORLD );


  /* Scatter the image into the recvbuf buffer*/

  MPI_Scatter( pixels, my_count, MPI_CHAR, recvbuf, my_count, MPI_DOUBLE,
                 0, MPI_COMM_WORLD );

  /* Take the sum of the squares of the partial image */
  
   for (i = 0, my_sum = 0; i < my_count; i++)
        my_sum += recvbuf[i]*recvbuf[i];

 
  /* Find the global sum of the squares = sum */
 
   MPI_Allreduce( &my_sum, &sum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );

  /* rank 0 calculates the root mean square */
  if (rank == 0) {
    rms = sqrt ((double) sum / (double) numpixels);
    printf("RMS = %lf\n", rms);
  }

  /* Rank 0 broadcasts the RMS to the other nodes */
  
    MPI_Bcast( &rms, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );

  /* Do the contrast operation */
  for (i=0; i< my_count; i++) {
    val = 2*recvbuf[i] - rms;
    if (val < 0 )
      recvbuf[i] = 0;
    else if (val > 255)
      recvbuf[i] = 255;
    else
      recvbuf[i] = val;
  }

  /* Gather back to root */


   MPI_Gather( recvbuf, my_count, MPI_CHAR, pixels, my_count, MPI_CHAR,
                0, MPI_COMM_WORLD );


  /* Dump the Image */
  if (rank == 0)
    dumptiff_("irish_new.tif", pixels, &width, &height);


  MPI_Finalize();
  return 0;
}


/* MPI Lab 4, Example Program   */


/*
 * MPI Developers Conference 96
 * http://www.cse.nd.edu/mpidc95/
 * University of Notre Dame
 *
 * MPI Tutorial
 * Lab 4
 *
 * Mail questions regarding tutorial material to mpidc@lsc.cse.nd.edu
 */

#include 
#include "mpi.h"


/*
 * Local function
 */

void Build_Matrix(int x, int y, MPI_Comm comm_in, MPI_Comm *comm_matrix, 
		  MPI_Comm *comm_row, MPI_Comm *comm_col);


/*
 * Global variables to make example simpler
 */

int rank, size, row, col;


int main(int argc, char **argv)
{
  int x, y;
  int sum_row, sum_col;
  MPI_Comm comm_matrix, comm_row, comm_col;
  
  /* Start up MPI */

  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &size);
 
  /* Invoke for a hard-coded X and Y value. */

  x= 2;
  y= 2;

  /* Print some output on the "console" process */

  if (rank == 0)
    printf("For a %dx%d matrix (size %d):\n", x, y, size);

  /* Call the function to build the communicators */

  Build_Matrix(x, y, MPI_COMM_WORLD, &comm_matrix, &comm_row, &comm_col);
  
  /* Perform the reductions */
  
  MPI_Allreduce(&rank, &sum_row, 1, MPI_INT, MPI_SUM, comm_row);
  MPI_Allreduce(&rank, &sum_col, 1, MPI_INT, MPI_SUM, comm_col);
  printf("Process (%d, %d): Row sum = %d, Col sum = %d\n", row, col,
	 sum_row, sum_col);

  /* Quit */

  MPI_Finalize();
  return 0;
}


/* 
 * Build_Matrix(int, int, MPI_Comm, MPI_Comm *, MPI_Comm *, MPI_Comm *)
 * Build the matrix, return the specified communicators
 */
void Build_Matrix(int x, int y, MPI_Comm comm_in, MPI_Comm *comm_matrix, 
		  MPI_Comm *comm_row, MPI_Comm *comm_col)
{ 
  /* Real (i.e. non-example) programs should obviously have */
  /* have better recovery methods */

  if (x * y != size) {
    if (rank == 0)
      printf("Sorry, program compiled for %dx%d matrix.  Please run with %d processors.\n", x, y, x*y);
    MPI_Finalize();
    exit(-1);
  }

  /* Setup the args for MPI_Comm_split() */

  row= rank / y;
  col= rank % y;

  /* Make the communicators.  First duplicate comm_in (which */
  /* in this case is MPI_COMM_WORLD) into comm_matrix.  Then */
  /* split comm_in again to create communicators for the row */
  /* and column by assigning the color of the splitting to be */
  /* the row and column numbers that we just calculated.  */

  MPI_Comm_dup(comm_in, comm_matrix);
  MPI_Comm_split(comm_in, row, rank, comm_row);
  MPI_Comm_split(comm_in, col, rank, comm_col);
}





/* MPI Lab 5, Example Program   */


/* Program to illustrate the MPI topology functions */
#include 
#include 

typedef enum{FALSE, TRUE} BOOLEAN;
#define N_DIMS 2

main(argc, argv) 
int argc;
char **argv;
{
    MPI_Comm comm_2d, row_comm, col_comm;
    int myrank, size, P, Q, p, q, reorder;
    int dims[N_DIMS],         /* number of dimensions */
        local[N_DIMS],        /* local row and column positions */
        period[N_DIMS],       /* aperiodic flags */
        remain_dims[N_DIMS];  /* sub-dimension computation flags */
    int left, right, bottom, top;
    int sum_row, sum_col;

    MPI_Init (&argc, &argv);

    MPI_Comm_rank (MPI_COMM_WORLD, &myrank);
    MPI_Comm_size (MPI_COMM_WORLD, &size);

    /* Generate a new communicator with virtual topology */
    dims[0] = dims[1] = 0;
    MPI_Dims_create( size, N_DIMS, dims );
    P = dims[0];
    Q = dims[1];

    reorder = TRUE;
    period[0] = period[1] = TRUE;
    MPI_Cart_create(MPI_COMM_WORLD, N_DIMS, dims, period, reorder, &comm_2d);

    /* Determine the position in the grid and split grid_comm
       into row and col comms */
    MPI_Cart_coords(comm_2d, myrank, N_DIMS, local);
    p = local[0]; 
    q = local[1];

    /* Get row and column communicators using cartesian sub-topology */
    remain_dims[0] = FALSE; 
    remain_dims[1] = TRUE;
    MPI_Cart_sub(comm_2d, remain_dims, &row_comm);

    remain_dims[0] = TRUE; 
    remain_dims[1] = FALSE;
    MPI_Cart_sub(comm_2d, remain_dims, &col_comm);

    MPI_Cart_shift(comm_2d, 0, 1, &left, &right);
    MPI_Cart_shift(comm_2d, 1, 1, &top, &bottom);

    printf("(%d,%d)[%d] left = %d right = %d top = %d bottom = %d\n",
           p, q, myrank, left, right, top, bottom);

    fflush(stdout);
    
    MPI_Allreduce(&myrank, &sum_row, 1, MPI_INT, MPI_SUM, row_comm);
    MPI_Allreduce(&myrank, &sum_col, 1, MPI_INT, MPI_SUM, col_comm);
    printf("Process (%d, %d): Row sum = %d, Col sum = %d\n", p, q,
	   sum_row, sum_col);
    
    MPI_Finalize();
}

/*

Output with 4 processors

(0,0)[0] left = 2 right = 2 top = 1 bottom = 1
(1,0)[2] left = 0 right = 0 top = 3 bottom = 3
(1,1)[3] left = 1 right = 1 top = 2 bottom = 2
(0,1)[1] left = 3 right = 3 top = 0 bottom = 0
Process (0, 0): Row sum = 1, Col sum = 2
Process (1, 0): Row sum = 5, Col sum = 2
Process (1, 1): Row sum = 5, Col sum = 4
Process (0, 1): Row sum = 1, Col sum = 4


*/

/* MPI Lab 6, Example Program   */


/* Lab6:

   Create a datatype called submatrix that consists of elements in
   alternate rows and alternate columns of the given original matrix.

   Use MPI_SENDRECV to send the submatrix to itself and print the
   results. To test this program you can run the program on just
   one processor.

   For example, if the given matrix is:

   1  2  3   4  5  6
   7  8  9  10 11 12
   13 14 15 16 17 18
   19 20 21 22 23 23
   24 25 26 27 28 29

   The submatrix created should look like:

   1   3  5
   13 15 17
   24 26 28

*/

#include 
 
int main(argc, argv)
int argc;
char **argv;
{
    int myrank, size ;
    int i, j, mym = 10, myn = 10;
    int a[10][10], b[5][5], c[5][5] ;
    MPI_Datatype subrow, submatrix ;
    MPI_Status status ;
    MPI_Aint sizeofint ;

    MPI_Init (&argc, &argv);
 
    MPI_Comm_rank (MPI_COMM_WORLD, &myrank);
    MPI_Comm_size (MPI_COMM_WORLD, &size);
 
    /* Initialize the local array */
    for (i = 0; i < mym; i++)
      for (j = 0; j < myn; j++)
	a[i][j] = i*myn + j + 1;

    /* Print the local matrix */
    for (i = 0; i < mym; i++){
      for (j = 0; j < myn; j++)
	printf("%d ", a[i][j]) ;
      printf("\n");
    }

    /* Create a submatrix datatype */
    
    /* Create datatype for the sub-row */

    MPI_Type_vector(5,1,2,MPI_INT,&subrow);

    /* Create datatype for the sub-matrix */
    
    MPI_Type_hvector(5,1,20*sizeof(int),subrow,&submatrix);

    /* Commit the datatype created */

    MPI_Type_commit(&submatrix);

    /* Send it to self and print it */

    MPI_Sendrecv(a,1,submatrix,myrank,201,c,25,MPI_INT,myrank,201,MPI_COMM_WORLD,&status);

    /* Print the submatrix */
    for (i = 0; i < 5; i++){
      for (j = 0; j < 5; j++)
	printf("%d ", c[i][j]) ;
      printf("\n");
    }

    MPI_Finalize();
}