1. /*
  2. Codes Here:
  3. // parallel code to generate prime numbers between 1 to N using openMP.//
  4. //Find prime numbers (serial Implementation)//
  5. //linear search//
  6. //dotProd//
  7. //Implement a parallel program to study the performance of the parallel for using OpenMP and compare the result with serial code
  8. //Write a simple OpenMP program to employ a ‘reduction’ clause to express the reduction of a for loop
  9. //Write an OpenMP program to specify that the schedule(dynamic, chunk-size) clause of the loop construct specifies that the for loop has the dynamic scheduling type.
  10. // MPI Program to perform binary search. //
  11. //Program to find biggest and smallest of three numbers in two different cores//
  12. //Program to perform matrix multiplication(omp prog)//
  13. //MPI program to find sum of an array//
  14. //. MPI program to find integration of function sin(X) over the range 0 to 2 pi MPI_Bcast to send information to each participating process and MPI_Reduce to get a grand total of the areas computed by each participating process.//
  15. //MPI program for matrix multiplication//
  16. */
  17. // parallel code to generate prime numbers between 1 to N using openMP.//
  18. #include<stdio.h>
  19. #include<omp.h>
  20. main()
  21. {
  22. int prime[1000],i,j,n;
  23. printf("\nIn order to find prime numbers from 1 to n, enter the value of n:");
  24. scanf("%d",&n);
  25. for(i=1;i<=n;i++)
  26. {
  27. prime[i]=1;
  28. }
  29. prime[1]=0;
  30. for(i=2;i*i<=n;i++)
  31. {
  32. /* multi-threading to remove multiples of prime number i from the list (array) */
  33. #pragma omp parallel for
  34. for(j=i*i;j<=n;j=j+i)
  35. {
  36. if(prime[j]==1)
  37. prime[j]=0;
  38. }
  39. }
  40. printf("\nPrime numbers from 1 to %d are\n",n);
  41. for(i=2;i<=n;i++)
  42. {
  43. if(prime[i] == 1)
  44. {
  45. printf("%d\t ",i);
  46. }
  47. }
  48. printf("\n");
  49. }
  50. //Find prime numbers (serial Implementation)//
  51. #include<stdio.h>
  52. #include<time.h>
  53. #include<unistd.h>
  54. int main(){
  55. double time_spent;
  56. clock_t begin=clock();
  57. int num,i,count,n;
  58. printf("Enter max range: ");
  59. scanf("%d",&n);
  60. for(num = 1;num<=n;num++){
  61. count = 0;
  62. for(i=2;i<=num/2;i++){
  63. if(num%i==0){
  64. count++;
  65. break;
  66. }
  67. }
  68. if(count==0 && num!= 1)
  69. printf("%d \t", num);
  70. }
  71. printf("\n");
  72. clock_t end = clock();
  73. time_spent = (double)(end-begin)/CLOCKS_PER_SEC;
  74. printf("\nTime elapsed: %f ", time_spent);
  75. return 0;
  76. }
  77. //linear search//
  78. #include<stdio.h>
  79. #include<omp.h>
  80. #define MAXTHREADS 10
  81. #define ARRAYSIZE 44
  82. int main(void)
  83. {
  84. int a[]={3,5,1,2,34,67,90,43,53,3,4,26,34,35,54,67,87,21,34,56,33,45,12,34,5,6,7,8,123,45,32,455,666,444,333,222,11,22,44,55,333,222},i,j,found=0,key=222;
  85. double start_time,run_time;
  86. for(j=1;j<=5;j++)
  87. {
  88. omp_set_num_threads(j);
  89. found=0;
  90. start_time = omp_get_wtime();
  91. #pragma omp parallel private(i)
  92. {
  93. int start,noofsteps;
  94. #pragma omp single
  95. printf("num of threads in action: %d\n",j);
  96. if(found==0)
  97. {
  98. start=(omp_get_thread_num())*(ARRAYSIZE/omp_get_num_threads());
  99. noofsteps=start+(ARRAYSIZE/omp_get_num_threads());
  100. if(ARRAYSIZE%j!=0)
  101. noofsteps+=(ARRAYSIZE%j);
  102. for(i=start;i<noofsteps;i++)
  103. if(key==a[i]) {
  104. printf("Key has been found in %d thread at %d position\n",omp_get_thread_num(),i+1);
  105. found=1;
  106. break;
  107. }
  108. }
  109. }
  110. run_time = omp_get_wtime() - start_time;
  111. printf("\n %f seconds %d threads \n ",run_time,j);
  112. }
  113. return 0;
  114. }
  115. //dotProd//
  116. #include <stdio.h>
  117. #include <stdlib.h>
  118. #include <time.h>
  119. #include <omp.h>
  120. #define VECTOR_LENGHT 1000
  121. double dot_product (int N, double* A, double* B)
  122. {
  123. double dot=0.0, *a,*b;
  124. int n,i, nthreads, tid;
  125. #pragma omp parallel default (none) reduction (+: dot)
  126. shared (N,A,B) private (n,i, nthreads, tid, a,b)
  127. {
  128. nthreads = omp_get_num_threads();
  129. tid = omp_get_thread_num();
  130. n = N/nthreads; // Min iter for all threads
  131. a = A + n*tid; // Ptrs to this threads
  132. b = B + n*tid; // chunks of X & Y
  133. if ( tid == nthreads-1 )
  134. n += N-n*nthreads;
  135. dot = a[0]*b[0];
  136. for (i=1; i<n; i++)
  137. dot += a[i]*b[i];
  138. }
  139. return dot;
  140. }
  141. int main (int argc, char *argv[])
  142. {
  143. int i;
  144. double vec_A[VECTOR_LENGHT], vec_B[VECTOR_LENGHT], sum;
  145. for (i=0; i<VECTOR_LENGHT; i++)
  146. vec_A[i] = vec_B[i] = 1.0*i;
  147. sum = dot_product(VECTOR_LENGHT, vec_A, vec_B);
  148. printf("Sum value: %.8f.\n", sum);
  149. system("PAUSE");
  150. return 0;
  151. }
  152. //Implement a parallel program to study the performance of the parallel for using OpenMP and compare the result with serial code
  153. //serialll
  154. Code Screenshot:
  155. #include<stdio.h>
  156. #include<time.h>
  157. #include<unistd.h>
  158. int main(){
  159. double time_spent;
  160. clock_t begin=clock()s;
  161. int num,i,count,n;
  162. printf("Enter max range: ");
  163. scanf("%d",&n);
  164. for(num = 1;num<=n;num++){
  165. count = 0;
  166. for(i=2;i<=num/2;i++){
  167. if(num%i==0){
  168. count++;
  169. break;
  170. }
  171. }
  172. if(count==0 && num!= 1)
  173. printf("%d \t", num);
  174. }
  175. printf("\n");
  176. clock_t end = clock();
  177. time_spent = (double)(end-begin)/CLOCKS_PER_SEC; printf("\nTime elapsed: %f ", time_spent); return 0;
  178. }
  179. //parallel
  180. #include<stdio.h>
  181. #include<omp.h>
  182. #include<time.h>
  183. #include<unistd.h>
  184. int main(){
  185. double time_spent;
  186. clock_t begin=clock();
  187. int prime[1000],i,j,n;
  188. printf("\nIn order to find prime numbers from 1 to n, enter the value of n:");
  189. scanf("%d",&n);
  190. for(i=1;i<=n;i++){
  191. prime[i]=1;
  192. }
  193. prime[1]=0;
  194. for(i=2;i*i<=n;i++){
  195. #pragma omp parallel for
  196. for(j=i*i;j<=n;j=j+i){
  197. if(prime[j]==1)
  198. prime[j]=0;
  199. }
  200. }
  201. printf("\nPrime numbers from 1 to %d are\n",n);
  202. for(i=2;i<=n;i++){
  203. if(prime[i] == 1){
  204. printf("%d\t ",i);
  205. }
  206. }
  207. printf("\n");
  208. clock_t end = clock();
  209. time_spent += (double)(end-begin)/CLOCKS_PER_SEC; printf("\nTime elapsed: %f ", time_spent);
  210. }
  211. //Write a simple OpenMP program to employ a ‘reduction’ clause to express the reduction of a for loop
  212. #include <stdio.h>
  213. #include <stdlib.h>
  214. #include <omp.h>
  215. void main()
  216. {
  217. int sum=0;
  218. int lsum=0;
  219. int A[8]={1,2,3,4,5,6,7,8};
  220. #pragma omp parallel private(lsum)
  221. {
  222. int i;
  223. #pragma omp for
  224. for (i=0; i<8; i++)
  225. {
  226. lsum = lsum +A[i];
  227. }
  228. #pragma omp critical
  229. {
  230. sum+=lsum;
  231. }
  232. }
  233. printf("sum is:%d\n", sum);
  234. }
  235. //Write an OpenMP program to specify that the schedule(dynamic, chunk-size) clause of the loop construct specifies that the for loop has the dynamic scheduling type.
  236. #include <omp.h>
  237. #include <stdio.h>
  238. #include <stdlib.h>
  239. #define CHUNKSIZE 10
  240. #define N 100
  241. int main (int argc, char *argv[])
  242. {
  243. int nthreads, tid, i, chunk;
  244. float a[N], b[N], c[N];
  245. for (i=0; i < N; i++)
  246. a[i] = b[i] = i * 1.0;
  247. chunk = CHUNKSIZE;
  248. #pragma omp parallel shared(a,b,c,nthreads,chunk) private(i,tid)
  249. {
  250. tid = omp_get_thread_num();
  251. if (tid == 0)
  252. {
  253. nthreads = omp_get_num_threads();
  254. printf("Number of threads = %d\n", nthreads);
  255. }
  256. printf("Thread %d starting...\n",tid);
  257. #pragma omp for schedule(dynamic,chunk)
  258. for (i=0; i<N; i++)
  259. {
  260. c[i] = a[i] + b[i];
  261. printf("Thread %d: c[%d]= %f\n",tid,i,c[i]);
  262. }
  263. }
  264. }
  265. // MPI Program to perform binary search. //
  266. #include<stdio.h>
  267. #include<time.h>
  268. #include<mpi.h>
  269. int main(int argc,char *argv[])
  270. {
  271. clock_t tic=clock();
  272. int rank, size;
  273. int a[10]={1,2,3,4,5,6,7,8,9,10};
  274. int b[10];
  275. int search=6,flag=0;
  276. int i;
  277. MPI_Init(&argc,&argv);
  278. MPI_Comm_rank(MPI_COMM_WORLD,&rank);
  279. MPI_Comm_size(MPI_COMM_WORLD,&size);
  280. MPI_Scatter(&a,5,MPI_INT,&b,5,MPI_INT,0,MPI_COMM_WORLD);
  281. if (rank==0)
  282. {
  283. for(i=0;i<5;i++)
  284. {
  285. if(b[i]==search)
  286. {
  287. printf("\nNumber Found!\t\t%d\t\t%d",rank,i);
  288. flag=1;
  289. }
  290. printf("\n%d\t\t%d",b[i],rank);
  291. }
  292. }
  293. if(rank==1)
  294. {
  295. for( i=0;i<5;i++)
  296. {
  297. if(b[i]==search)
  298. {
  299. printf("\nNumber Found!\t\t%d\t\t%d",rank,i);
  300. flag=1;
  301. }
  302. printf("\n%d\t\t%d",b[i],rank);
  303. }
  304. }
  305. //Program to find biggest and smallest of three numbers in two different cores//
  306. #include<stdio.h>
  307. #include<omp.h>
  308. #include<time.h>
  309. int main()
  310. {
  311. int a=50;
  312. int b=100;
  313. int c=150;
  314. clock_t start_clock = clock();
  315. #pragma omp parallel
  316. {
  317. int id = omp_get_thread_num();
  318. if(id==0)
  319. {
  320. if(a>b && a>c)
  321. printf("Biggest number is %d\n",a);
  322. else if(b>a && b>c)
  323. printf("Biggest number is %d\n",b);
  324. else
  325. printf("Biggest number is %d\n",c);
  326. }
  327. else
  328. {
  329. if(a<b && a<c)
  330. printf("Smallest number is %d\n",a);
  331. else if(b<a && b<c)
  332. printf("Smallest number is %d\n",b);
  333. else
  334. printf("Smallest number is %d\n",c);
  335. }
  336. }
  337. clock_t end_clock = clock();
  338. printf("\nProgram Execution Time : %ld ms\n\n",(end_clock-start_clock));
  339. return 0;
  340. }
  341. //Program to perform matrix multiplication(omp prog)//
  342. #include<stdio.h>
  343. #include<omp.h>
  344. #include<time.h>
  345. int main()
  346. {
  347. int i=0,j=0,k=0;
  348. int a[2][2]={{1,2},{3,4}};
  349. int b[2][2]={{1,2},{3,4}};
  350. int c[2][2]={{0,0},{0,0}};
  351. clock_t start_clock = clock();
  352. #pragma omp parallel for
  353. for(i=0;i<2;i++)
  354. {
  355. for(j=0;j<2;j++)
  356. {
  357. for(k=0;k<2;k++)
  358. {
  359. c[i][j]=c[i][j]+a[i][k]*b[k][j];
  360. }
  361. }
  362. }
  363. clock_t end_clock = clock();
  364. printf("\nProgram Execution Time : %ld ms\n\n",(end_clock-start_clock));
  365. printf("Resultant Matrix\n");
  366. for(i=0;i<2;i++)
  367. {
  368. for(j=0;j<2;j++)
  369. {
  370. printf("C[%d][%d]=%d\n",i,j,c[i][j]);
  371. }
  372. }
  373. return 0;
  374. }
  375. //MPI program to find sum of an array//
  376. #include "mpi.h"
  377. #include <stdio.h>
  378. #include <stdlib.h>
  379. #define ARRAYSIZE 20000000
  380. #define MASTER 0 double data[ARRAYSIZE];
  381. int main (int argc, char *argv[])
  382. {
  383. int numtasks, taskid, rc, dest, offset, i, j, tag1, tag2, source, chunksize, leftover;
  384. double mysum, sum;
  385. double update(int myoffset, int chunk, int myid); MPI_Status status;
  386. /***** Initializations *****/
  387. MPI_Init(&argc, &argv);
  388. MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
  389. MPI_Comm_rank(MPI_COMM_WORLD,&taskid); printf ("MPI task %d has started... ", taskid); chunksize = (ARRAYSIZE / numtasks); leftover = (ARRAYSIZE % numtasks); tag2 = 1; tag1 = 2;
  390. /***** Master task only ******/ if (taskid == MASTER){
  391. /* Initialize the array */
  392. sum = 0;
  393. for(i=0; i<ARRAYSIZE; i++) { data[i] = i * 1.0; sum = sum + data[i];
  394. }
  395. printf("Initialized array sum = %e\n",sum); printf("numtasks= %d chunksize= %d leftover= %d\ n",numtasks,chunksize,leftover);
  396. /* Send each task its portion of the array - master keeps 1st part plus
  397. leftover elements */ offset = chunksize + leftover; for (dest=1; dest<numtasks; dest++) {
  398. MPI_Send(&offset, 1, MPI_INT, dest, tag1, MPI_COMM_WORLD);
  399. MPI_Send(&data[offset], chunksize, MPI_DOUBLE, dest, tag2, MPI_COMM_WORLD); printf("Sent %d elements to task %d offset= %d\n",chunksize,dest,offset);
  400. offset = offset + chunksize; }
  401. /* Master does its part of the work */
  402. offset = 0;
  403. mysum = update(offset, chunksize+leftover, taskid);
  404. /* Wait to receive results from each task */
  405. for (i=1; i<numtasks; i++) { source = i;
  406. MPI_Recv(&offset, 1, MPI_INT, source, tag1, MPI_COMM_WORLD, &status);
  407. MPI_Recv(&data[offset], chunksize, MPI_DOUBLE, source, tag2,
  408. MPI_COMM_WORLD, &status);
  409. }
  410. /* Get final sum and print sample results */
  411. MPI_Reduce(&mysum, &sum, 1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD);
  412. printf("Sample results: \n");
  413. offset = 0;
  414. for (i=0; i<numtasks; i++) { for (j=0; j<5; j++) printf(" %e",data[offset+j]);
  415. printf("\n");
  416. offset = offset + chunksize;
  417. }
  418. printf("*** Final sum= %e ***\n",sum); } /* end of master section */
  419. /***** Non-master tasks only *****/ if (taskid > MASTER) {
  420. /* Receive my portion of array from the master task */
  421. source = MASTER;
  422. MPI_Recv(&offset, 1, MPI_INT, source, tag1, MPI_COMM_WORLD, &status);
  423. MPI_Recv(&data[offset], chunksize, MPI_DOUBLE, source, tag2,
  424. MPI_COMM_WORLD, &status);
  425. /* Do my part of the work */
  426. mysum = update(offset, chunksize, taskid);
  427. /* Send my results back to the master task */ dest = MASTER;
  428. MPI_Send(&offset, 1, MPI_INT, dest, tag1, MPI_COMM_WORLD);
  429. MPI_Send(&data[offset], chunksize, MPI_DOUBLE, MASTER, tag2, MPI_COMM_WORLD);
  430. /* Use sum reduction operation to obtain final sum */
  431. MPI_Reduce(&mysum, &sum, 1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD); } /* end of non-master */
  432. MPI_Finalize(); } /* end of main */
  433. double update(int myoffset, int chunk, int myid) { int i; double mysum;
  434. /* Perform addition to each of my array elements and keep my sum */
  435. mysum = 0;
  436. for(i=myoffset; i < myoffset + chunk; i++) {
  437. data[i] = data[i] + (i * 1.0); mysum = mysum + data[i];
  438. }
  439. printf("Task %d mysum = %e\n",myid,mysum);
  440. return(mysum);
  441. }
  442. //MPI program for matrix multiplication//
  443. #include "mpi.h"
  444. #include <stdio.h>
  445. #include <stdlib.h>
  446. #define NRA 62 /* number of rows in matrix A */
  447. #define NCA 15 /* number of columns in matrix A */
  448. #define NCB 7 /* number of columns in matrix B */
  449. #define MASTER 0 /* taskid of first task */
  450. #define FROM_MASTER 1 /* setting a message type */ #define FROM_WORKER 2 /* setting a message type */
  451. int main (int argc, char *argv[])
  452. {
  453. int numtasks, /* number of tasks in partition */
  454. taskid, /* a task identifier */ numworkers, /* number of worker tasks */ source, /* task id of message source */ dest, /* task id of message destination */ mtype, /* message type */
  455. rows, /* rows of matrix A sent to each worker */ averow, extra, offset, /* used to determine rows sent to each worker */ i, j, k, rc; /* misc */
  456. double a[NRA][NCA], /* matrix A to be multiplied */ b[NCA][NCB], /* matrix B to be multiplied */ c[NRA][NCB]; /* result matrix C */ MPI_Status status;
  457. MPI_Init(&argc,&argv);
  458. MPI_Comm_rank(MPI_COMM_WORLD,&taskid); MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
  459. if (numtasks < 2 ) {
  460. printf("Need at least two MPI tasks. Quitting...\n");
  461. MPI_Abort(MPI_COMM_WORLD, rc); exit(1);
  462. }
  463. numworkers = numtasks-1;
  464. /**************************** master task ************************************/ if (taskid == MASTER)
  465. {
  466. printf("mpi_mm has started with %d tasks.\n",numtasks); printf("Initializing arrays...\n"); for (i=0; i<NRA; i++) for (j=0; j<NCA; j++) a[i][j]= i+j;
  467. for (i=0; i<NCA; i++) for (j=0; j<NCB; j++) b[i][j]= i*j;
  468. /* Send matrix data to the worker tasks */ averow = NRA/numworkers; extra = NRA%numworkers;
  469. offset = 0;
  470. mtype = FROM_MASTER;
  471. for (dest=1; dest<=numworkers; dest++)
  472. {
  473. rows = (dest <= extra) ? averow+1 : averow;
  474. printf("Sending %d rows to task %d offset=%d\n",rows,dest,offset);
  475. MPI_Send(&offset, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
  476. MPI_Send(&rows, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
  477. MPI_Send(&a[offset][0], rows*NCA, MPI_DOUBLE, dest, mtype,
  478. MPI_COMM_WORLD);
  479. MPI_Send(&b, NCA*NCB, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD); offset = offset + rows; }
  480. /* Receive results from worker tasks */ mtype = FROM_WORKER;
  481. for (i=1; i<=numworkers; i++)
  482. { source = i;
  483. MPI_Recv(&offset, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
  484. MPI_Recv(&rows, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
  485. MPI_Recv(&c[offset][0], rows*NCB, MPI_DOUBLE, source, mtype, MPI_COMM_WORLD, &status); printf("Received results from task %d\n",source);
  486. }
  487. /* Print results */
  488. printf("******************************************************\n"); printf("Result Matrix:\n"); for (i=0; i<NRA; i++)
  489. {
  490. printf("\n"); for (j=0; j<NCB; j++) printf("%6.2f ", c[i][j]);
  491. }
  492. printf("\n******************************************************\n"); printf ("Done.\n");
  493. }
  494. /**************************** worker task ************************************/ if (taskid > MASTER)
  495. {
  496. mtype = FROM_MASTER;
  497. MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
  498. MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
  499. MPI_Recv(&a, rows*NCA, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD,
  500. &status);
  501. MPI_Recv(&b, NCA*NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD,
  502. &status);
  503. for (k=0; k<NCB; k++) for (i=0; i<rows; i++)
  504. {
  505. c[i][k] = 0.0; for (j=0; j<NCA; j++)
  506. c[i][k] = c[i][k] + a[i][j] * b[j][k];
  507. }
  508. mtype = FROM_WORKER;
  509. MPI_Send(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
  510. MPI_Send(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
  511. MPI_Send(&c, rows*NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD);
  512. }
  513. MPI_Finalize(); }
  514. //. MPI program to find integration of function sin(X) over the range 0 to 2 pi MPI_Bcast to send information to each participating process and MPI_Reduce to get a grand total of the areas computed by each participating process.//
  515. #include <stdio.h>
  516. #include <math.h>
  517. #include <mpi.h> #define PI 3.1415926535
  518. main(int argc, char **argv)
  519. {
  520. int my_id, root_process, num_procs, ierr, num_intervals, i; double rect_width, area, sum, x_middle, partial_sum; MPI_Status status;
  521. /* Let process 0 be the root process. */
  522. root_process = 0; /* Now replicate this process to create parallel processes. */ ierr = MPI_Init(&argc, &argv); /* Find out MY process ID, and how many processes were started. */
  523. ierr = MPI_Comm_rank(MPI_COMM_WORLD, &my_id); ierr = MPI_Comm_size(MPI_COMM_WORLD, &num_procs); if(my_id == root_process) {
  524. /* I must be the root process, so I will query the user to determine how many interpolation intervals to use. */
  525. printf("Please enter the number of intervals to interpolate: "); scanf("%i", &num_intervals); }
  526. /* Then...no matter which process I am:
  527. *
  528. * I engage in a broadcast so that the number of intervals is * sent from the root process to the other processes, and ...
  529. **/
  530. ierr = MPI_Bcast(&num_intervals, 1, MPI_INT, root_process, MPI_COMM_WORLD); /* calculate the width of a rectangle, and */ rect_width = PI / num_intervals;
  531. /* then calculate the sum of the areas of the rectangles for
  532. * which I am responsible. Start with the (my_id +1)th * interval and process every num_procs-th interval thereafter.
  533. **/ partial_sum = 0;
  534. for(i = my_id + 1; i <num_intervals + 1; i += num_procs) {
  535. /* Find the middle of the interval on the X-axis. */
  536. x_middle = (i - 0.5) * rect_width; area = sin(x_middle) * rect_width; partial_sum = partial_sum + area;
  537. }
  538. printf("proc %i computes: %f\n", my_id, (float)partial_sum);
  539. /* and finally, engage in a reduction in which all partial sums
  540. * are combined, and the grand sum appears in variable "sum" in
  541. * the root process,
  542. **/
  543. ierr = MPI_Reduce(&partial_sum, &sum, 1, MPI_DOUBLE, MPI_SUM, root_process, MPI_COMM_WORLD); /* and, if I am the root process, print the result. */
  544. if(my_id == root_process) {
  545. printf("The integral is %f\n", (float)sum);
  546. /* (yes, we could have summed just the heights, and
  547. * postponed the multiplication by rect_width til now.) */
  548. } /* Close down this processes. */
  549. ierr = MPI_Finalize(); }