Collective — as opposed to point-to-point — communication routines are a group of MPI message passing routines to perform one (processor)-to-many (processors) and many-to-one communications.
MPI_Bcast is one such example used to broadcast data from one processor to other participating processors. These routines and their functionalities are shown diagramatically in the table below. The first four columns on the left denote the contents of respective send buffers (e.g., arrays) of four processes. The content of each buffer, shown here as alphabets, isassigned a unique color to identify its origin. For instance, the alphabets in blue indicate that
they originated from process 1. The middle column shows the MPI routines with which the send buffers are operated on. The four columns on the right represent the contents of the processes’ receive buffers resulting from the MPI operations.
| Process 0 |
Process 1* |
Process 2 |
Process 3 |
Function Used | Process 0 |
Process 1* |
Process 2 |
Process 3 |
|---|---|---|---|---|---|---|---|---|
| a | b | c | d | MPI_Gather |
  | a,b,c,d |   |   |
| a | b | c | d | MPI_Allgather | a,b,c,d | a,b,c,d | a,b,c,d | a,b,c,d |
|   | a,b,c,d |   |   | MPI_Scatter | a | b | c | d |
| a,b,c,d | e,f,g,h | i,j,k,l | m,n,o,p | MPI_Alltoall | a,e, i,m |
b,f, j,n |
c,g, k,o |
d,h, l,p |
|   | b |   |   | MPI_Bcast | b | b | b | b |
| Send Buffer | Send Buffer | Send Buffer | Send Buffer | ![]() |
Receive Buffer | Receive Buffer | Receive Buffer | Receive Buffer |
* Root process designation required by MPI_Gather, MPI_Scatter,.
MPI_Bcast
Example 2. Collective Communication Usages.
The above table is generated by a fortran code shown below: (a C code is also
available)
program collectives_example
implicit none
integer p, ierr, i, Iam, root
include "mpif.h" !! This brings in pre-defined MPI constants, ...
character*1 x(0:3), y(0:3), alphabets(0:15), a
data alphabets/'a','b','c','d','e','f','g','h','i','j','k','l',
& 'm','n','o','p'/
data root/1/ !! process 1 is the data sender/originator
c**Starts MPI processes ...
call MPI_Init(ierr) !! starts MPI
call MPI_Comm_rank(MPI_COMM_WORLD, Iam, ierr) !! get current process id
call MPI_Comm_size(MPI_COMM_WORLD, p, ierr) !! get number of processes
if (Iam .eq. 0) then
write(*,*)
write(*,*)'* This program demonstrates the use of collective',
& ' MPI functions'
write(*,*)'* Four processors are to be used for the demo'
write(*,*)'* Process 1 (of 0,1,2,3) is the designated root'
write(*,*)
write(*,*)
write(*,*)' Function Proc Sendbuf Recvbuf'
write(*,*)' -------- ---- ------- -------'
endif
|
c**Performs a gather operation
a = alphabets(Iam)
do i=0,p-1
y(i) = ' '
enddo
call MPI_Gather(a,1,MPI_CHARACTER, ! send buf,count,type
& y,1,MPI_CHARACTER, ! recv buf,count,type
& root, ! root (data origin)
& MPI_COMM_WORLD,ierr) ! comm,flag
write(*,"('MPI_Gather:',t20,i2,(3x,a1),t40,4(3x,a1))")Iam,a,y
|
call MPI_Barrier(MPI_COMM_WORLD,ierr)
|
c**Performs an all-gather operation
a = alphabets(Iam)
do i=0,p-1
y(i) = ' '
enddo
call MPI_Allgather(a,1,MPI_CHARACTER, ! send buf,count,type
& y,1,MPI_CHARACTER, ! recv buf,count,type
& MPI_COMM_WORLD,ierr) ! comm,flag
write(*,"('MPI_Allgather:',t20,i2,(3x,a1),t40,4(3x,a1))")Iam,a,y
|
call MPI_Barrier(MPI_COMM_WORLD,ierr)
|
c**Perform a scatter operation
do i=0,p-1
x(i) = alphabets(i+Iam*p)
y(i) = ' '
enddo
call MPI_scatter(x,1,MPI_CHARACTER, ! send buf,count,type
& y,1,MPI_CHARACTER, ! recv buf,count,type
& root, ! data origin
& MPI_COMM_WORLD,ierr) ! comm,flag
write(*,"('MPI_scatter:',t20,i2,4(3x,a1),t40,4(3x,a1))")Iam,x,y
|
call MPI_Barrier(MPI_COMM_WORLD,ierr)
|
c**Perform an all-to-all operation
do i=0,p-1
x(i) = alphabets(i+Iam*p)
y(i) = ' '
enddo
call MPI_Alltoall(x,1,MPI_CHARACTER, ! send buf,count,type
& y,1,MPI_CHARACTER, ! recv buf,count,type
& MPI_COMM_WORLD,ierr) ! comm,flag
write(*,"('MPI_Alltoall:',t20,i2,4(3x,a1),t40,4(3x,a1))")Iam,x,y
|
call MPI_Barrier(MPI_COMM_WORLD,ierr)
|
c**Performs a broadcast operation
a = ' '
do i=0,p-1
y(i) = ' '
enddo
if(Iam .eq. root) then
a = 'b'
y(0) = a
endif
call MPI_Bcast(a,1,MPI_CHARACTER, ! buf,count,type
& root,MPI_COMM_WORLD,ierr) ! root,comm,flag
write(*,"('MPI_Bcast:',t20,i2,4(3x,a1),t40,4(3x,a1))")Iam,y,a
|
call MPI_Finalize(ierr) !! let MPI finish up ...
end
|

