Write a parallel program with MPI that supports the following computation. 1). I
ID: 3764704 • Letter: W
Question
Write a parallel program with MPI that supports the following computation.
1). It generates five processes P0, P1, P2, P3, and P4.
2). Main process gets a number n from key board, then initiates MPI.
3) Process Pi (i=0,1,2,3) uses n to call the following two functions.
a) function prime(int n) finds the smallest prime number q that q=8m+(2i+1) >n for some integer m. Note a prime number p is an integer that is not product of two integers less than p.
b) function twin(int n) finds the least twin number (q, q+2) that q=8m+(2i+1)> n for some integer m. A pair (q,q+2) is a twin if both q and q+2 are prime numbers.
4) P4 gets all four results from the other four processes and returns the least prime number and twin.
For example, if n=10, P0 returns 11 and (11,13), P1 returns 13 and (17, 19), P2 returns 17 and (17, 19), and P3 returns 19 and (29,31). Finally, P4 returns 11 and (11,13).
All the five processes share the same program.
Your complete C++ code for this project with summary in a word document, please have readable code
Screenshots of test runs
please anyone help me with this program its urgent
Explanation / Answer
Block distribution
:
p
Æ
number of processors
n
Æ
number of iterations
n = p X q + r
quotient
reminder
Example
:
n = 14, p = 4, q = 3, r
= 2
Processors 0......r-1 are assigned q+1 iterations
the rest are assigned q iterations
n = r (
q + 1 ) + ( p –r ) q
I
t
e
r
a
t
i
o
n
1
2
3
4
5
6
7
8
9
1
0
1
1
1
2
1
3
1
4
R
a
n
k
0
0
0
0
1
1
1
1
2
2
2
3
3
3
11
The para_range
subroutine
:
Computes the iteration range for each MPI pr
ocess
FORTRAN implementation
subroutine para_range(
n
1, n2
, nprocs, irank, ista, iend)
integer(4) :: n1
! Lowest value of iteration variable
integer(4) :: n2
! Highest value of iteration variable
integer(4) :: npr
ocs
! # cores
integer(4) :: irank
! Iproc
(
rank)
integer(4) :: ista
! Start of iterations for rank ipr
o
c
integer(4) :: iend
! End of iterations for rank ipr
o
c
iwork1 = ( n2 -
n
1 + 1 ) / nprocs
iwork2 = MOD(n2 -
n
1 + 1,
npr
o
cs)
ista
= irank
*
iwork1 + n1
+
MIN(irank, iwork2)
ien
d
= i
s
t
a
+ iwor
k1 -
1
if ( iwork2 > irank
)
iend
= iend
+ 1
re
turn
end subr
outine para_range
12
The para_range
subroutine, con’d
:
Computes the iteration range for each MPI pr
ocess
C / C++ implementation
void para_range(int
n1, int
n
2, int
&
nprocs, int
&
irank, int
&
ista, int
&
iend){
int
i
work1;
int
i
work2;
iwork1 = ( n2 -
n
1 + 1 ) / nprocs;
iwork2 = ( ( n2 -
n
1 + 1 ) % npr
ocs
)
;
ista
= irank
*
iwork1 + n1 + min(irank, iwork2);
iend
= ista
+ iwor
k1 -
1
;
if ( iwork2 > irank
)
iend
= iend
+ 1;
}
13
Simple example
:
Sum up elements of an array ( serial code )
C++
Fortran
#
include <iostream>
#include <math.h>
using namespace std;
int
m
ain(){
int
i
;
int
n
= 1000;
int
a
[n];
int
s
um;
for ( i = 1; i <= n; i++ ){
a[i] = i;
}
sum = 0;
for ( i = 1; i <= n; i++ ){
sum = sum
+
a[i];
}
cout
<< "sum = " << sum <<
endl;
return 0 ;
}
program main
implicit none
integer(4) :: i, sum
integer(4), parameter :: n = 1000
integer(4) :: a(n)
do i = 1, n
a(i) = i
end do
sum = 0.0
do i = 1, n
sum = sum
+
a(i)
end do
write(6,*) ’sum =’,sum
end pr
ogram main
14
Sum up elements of an array ( parallel code, Fortran )
progra
m
main
implicit none
include 'mpif.h'
integer(4), parame
ter :: n = 1000
integer(4) :: a(n)
inte
ge
r(4
)
:: i, ista
, ie
nd, s
u
m
,
s
s
u
m
,
ie
rr, iproc
,
nproc
call MPI_INIT(ierr)
call MPI_COMM_SIZE(MPI_COM
M_WORLD
,
nproc, ierr)
ca
l
l
MPI_COMM_RANK(MPI_COMM_
WORLD,
iproc,
ierr
)
cal
l
para_range(1,
n
,
npr
oc, iproc, ista, iend)
do i = is
ta
, ie
nd
a(i) = i
end do
sum =
0.0
do i = is
ta
, ie
nd
sum = sum
+
a(
i)
end do
cal
l
MPI_RED
UCE(sum,
ssum,
1,
MPI_INTE
GER,MPI_SUM,
0,
MPI_COMM_WORLD
,
ierr
)
sum =
ssum
if ( iproc
=
= 0 ) write
(
6
,
*)'
s
um ='
, s
u
m
call MPI_FINALIZE(ierr)
end program main
15
Sum up elements of an array ( parallel code, C++ )
#
i
ncl
u
de <
i
ost
r
eam>
#includ
e
<mpi.h>
using namespace std;
in
t
m
ain
(
in
t
argc, ch
ar** argv){
int
i
;
int
n
= 1000;
int
a
[n];
int
s
um, ssum, iproc,
npro
c, ista, iend
;
MPI_Init(&argc,&argv);
MPI_Comm_
rank(MPI_COMM_WORLD,
&iproc);
MPI_Comm_size(MPI_COMM_WORLD,
&nproc);
p
ara_range(1,n,nproc,iproc,ista,iend
)
;
for ( i = ista; i <=
iend
; i+
+ ){
a[i-1] = i;
}
sum = 0;
for ( i = ista; i <=
iend
; i+
+ ){
sum = sum
+
a[i-1];
}
MPI_Reduce(&sum,&ssum,1,MPI_INTE
GER,MPI_SUM,0,MPI_COMM_WORLD);
sum = ssum;
if ( iproc
=
= 0 ){
cou
t
<< "sum = " << su
m << end
l
;
}
MPI
_
Fi
na
li
ze();
return 0;
}
16
Collective communication
:
Collective communication allows you
to exchange data among a group of
processes. The communicator argument in the collective communication
subroutine calls specifies which proces
ses are involved in the communication.
MPI_COMM_WORLD
0
1
2
3
n-1
...
17
Related Questions
drjack9650@gmail.com
Navigate
Integrity-first tutoring: explanations and feedback only — we do not complete graded work. Learn more.