- /*
- Codes Here:
- // parallel code to generate prime numbers between 1 to N using openMP.//
- //Find prime numbers (serial Implementation)//
- //linear search//
- //dotProd//
- //Implement a parallel program to study the performance of the parallel for using OpenMP and compare the result with serial code
- //Write a simple OpenMP program to employ a ‘reduction’ clause to express the reduction of a for loop
- //Write an OpenMP program to specify that the schedule(dynamic, chunk-size) clause of the loop construct specifies that the for loop has the dynamic scheduling type.
- // MPI Program to perform binary search. //
- //Program to find biggest and smallest of three numbers in two different cores//
- //Program to perform matrix multiplication(omp prog)//
- //MPI program to find sum of an array//
- //. MPI program to find integration of function sin(X) over the range 0 to 2 pi MPI_Bcast to send information to each participating process and MPI_Reduce to get a grand total of the areas computed by each participating process.//
- //MPI program for matrix multiplication//
- */
- // parallel code to generate prime numbers between 1 to N using openMP.//
- #include<stdio.h>
- #include<omp.h>
- main()
- {
- int prime[1000],i,j,n;
- printf("\nIn order to find prime numbers from 1 to n, enter the value of n:");
- scanf("%d",&n);
- for(i=1;i<=n;i++)
- {
- prime[i]=1;
- }
- prime[1]=0;
- for(i=2;i*i<=n;i++)
- {
- /* multi-threading to remove multiples of prime number i from the list (array) */
- #pragma omp parallel for
- for(j=i*i;j<=n;j=j+i)
- {
- if(prime[j]==1)
- prime[j]=0;
- }
- }
- printf("\nPrime numbers from 1 to %d are\n",n);
- for(i=2;i<=n;i++)
- {
- if(prime[i] == 1)
- {
- printf("%d\t ",i);
- }
- }
- printf("\n");
- }
- //Find prime numbers (serial Implementation)//
- #include<stdio.h>
- #include<time.h>
- #include<unistd.h>
- int main(){
- double time_spent;
- clock_t begin=clock();
- int num,i,count,n;
- printf("Enter max range: ");
- scanf("%d",&n);
- for(num = 1;num<=n;num++){
- count = 0;
- for(i=2;i<=num/2;i++){
- if(num%i==0){
- count++;
- break;
- }
- }
- if(count==0 && num!= 1)
- printf("%d \t", num);
- }
- printf("\n");
- clock_t end = clock();
- time_spent = (double)(end-begin)/CLOCKS_PER_SEC;
- printf("\nTime elapsed: %f ", time_spent);
- return 0;
- }
- //linear search//
- #include<stdio.h>
- #include<omp.h>
- #define MAXTHREADS 10
- #define ARRAYSIZE 44
- int main(void)
- {
- int a[]={3,5,1,2,34,67,90,43,53,3,4,26,34,35,54,67,87,21,34,56,33,45,12,34,5,6,7,8,123,45,32,455,666,444,333,222,11,22,44,55,333,222},i,j,found=0,key=222;
- double start_time,run_time;
- for(j=1;j<=5;j++)
- {
- omp_set_num_threads(j);
- found=0;
- start_time = omp_get_wtime();
- #pragma omp parallel private(i)
- {
- int start,noofsteps;
- #pragma omp single
- printf("num of threads in action: %d\n",j);
- if(found==0)
- {
- start=(omp_get_thread_num())*(ARRAYSIZE/omp_get_num_threads());
- noofsteps=start+(ARRAYSIZE/omp_get_num_threads());
- if(ARRAYSIZE%j!=0)
- noofsteps+=(ARRAYSIZE%j);
- for(i=start;i<noofsteps;i++)
- if(key==a[i]) {
- printf("Key has been found in %d thread at %d position\n",omp_get_thread_num(),i+1);
- found=1;
- break;
- }
- }
- }
- run_time = omp_get_wtime() - start_time;
- printf("\n %f seconds %d threads \n ",run_time,j);
- }
- return 0;
- }
- //dotProd//
- #include <stdio.h>
- #include <stdlib.h>
- #include <time.h>
- #include <omp.h>
- #define VECTOR_LENGHT 1000
- double dot_product (int N, double* A, double* B)
- {
- double dot=0.0, *a,*b;
- int n,i, nthreads, tid;
- #pragma omp parallel default (none) reduction (+: dot)
- shared (N,A,B) private (n,i, nthreads, tid, a,b)
- {
- nthreads = omp_get_num_threads();
- tid = omp_get_thread_num();
- n = N/nthreads; // Min iter for all threads
- a = A + n*tid; // Ptrs to this threads
- b = B + n*tid; // chunks of X & Y
- if ( tid == nthreads-1 )
- n += N-n*nthreads;
- dot = a[0]*b[0];
- for (i=1; i<n; i++)
- dot += a[i]*b[i];
- }
- return dot;
- }
- int main (int argc, char *argv[])
- {
- int i;
- double vec_A[VECTOR_LENGHT], vec_B[VECTOR_LENGHT], sum;
- for (i=0; i<VECTOR_LENGHT; i++)
- vec_A[i] = vec_B[i] = 1.0*i;
- sum = dot_product(VECTOR_LENGHT, vec_A, vec_B);
- printf("Sum value: %.8f.\n", sum);
- system("PAUSE");
- return 0;
- }
- //Implement a parallel program to study the performance of the parallel for using OpenMP and compare the result with serial code
- //serialll
- Code Screenshot:
- #include<stdio.h>
- #include<time.h>
- #include<unistd.h>
- int main(){
- double time_spent;
- clock_t begin=clock()s;
- int num,i,count,n;
- printf("Enter max range: ");
- scanf("%d",&n);
- for(num = 1;num<=n;num++){
- count = 0;
- for(i=2;i<=num/2;i++){
- if(num%i==0){
- count++;
- break;
- }
- }
- if(count==0 && num!= 1)
- printf("%d \t", num);
- }
- printf("\n");
- clock_t end = clock();
- time_spent = (double)(end-begin)/CLOCKS_PER_SEC; printf("\nTime elapsed: %f ", time_spent); return 0;
- }
- //parallel
- #include<stdio.h>
- #include<omp.h>
- #include<time.h>
- #include<unistd.h>
- int main(){
- double time_spent;
- clock_t begin=clock();
- int prime[1000],i,j,n;
- printf("\nIn order to find prime numbers from 1 to n, enter the value of n:");
- scanf("%d",&n);
- for(i=1;i<=n;i++){
- prime[i]=1;
- }
- prime[1]=0;
- for(i=2;i*i<=n;i++){
- #pragma omp parallel for
- for(j=i*i;j<=n;j=j+i){
- if(prime[j]==1)
- prime[j]=0;
- }
- }
- printf("\nPrime numbers from 1 to %d are\n",n);
- for(i=2;i<=n;i++){
- if(prime[i] == 1){
- printf("%d\t ",i);
- }
- }
- printf("\n");
- clock_t end = clock();
- time_spent += (double)(end-begin)/CLOCKS_PER_SEC; printf("\nTime elapsed: %f ", time_spent);
- }
- //Write a simple OpenMP program to employ a ‘reduction’ clause to express the reduction of a for loop
- #include <stdio.h>
- #include <stdlib.h>
- #include <omp.h>
- void main()
- {
- int sum=0;
- int lsum=0;
- int A[8]={1,2,3,4,5,6,7,8};
- #pragma omp parallel private(lsum)
- {
- int i;
- #pragma omp for
- for (i=0; i<8; i++)
- {
- lsum = lsum +A[i];
- }
- #pragma omp critical
- {
- sum+=lsum;
- }
- }
- printf("sum is:%d\n", sum);
- }
- //Write an OpenMP program to specify that the schedule(dynamic, chunk-size) clause of the loop construct specifies that the for loop has the dynamic scheduling type.
- #include <omp.h>
- #include <stdio.h>
- #include <stdlib.h>
- #define CHUNKSIZE 10
- #define N 100
- int main (int argc, char *argv[])
- {
- int nthreads, tid, i, chunk;
- float a[N], b[N], c[N];
- for (i=0; i < N; i++)
- a[i] = b[i] = i * 1.0;
- chunk = CHUNKSIZE;
- #pragma omp parallel shared(a,b,c,nthreads,chunk) private(i,tid)
- {
- tid = omp_get_thread_num();
- if (tid == 0)
- {
- nthreads = omp_get_num_threads();
- printf("Number of threads = %d\n", nthreads);
- }
- printf("Thread %d starting...\n",tid);
- #pragma omp for schedule(dynamic,chunk)
- for (i=0; i<N; i++)
- {
- c[i] = a[i] + b[i];
- printf("Thread %d: c[%d]= %f\n",tid,i,c[i]);
- }
- }
- }
- // MPI Program to perform binary search. //
- #include<stdio.h>
- #include<time.h>
- #include<mpi.h>
- int main(int argc,char *argv[])
- {
- clock_t tic=clock();
- int rank, size;
- int a[10]={1,2,3,4,5,6,7,8,9,10};
- int b[10];
- int search=6,flag=0;
- int i;
- MPI_Init(&argc,&argv);
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- MPI_Scatter(&a,5,MPI_INT,&b,5,MPI_INT,0,MPI_COMM_WORLD);
- if (rank==0)
- {
- for(i=0;i<5;i++)
- {
- if(b[i]==search)
- {
- printf("\nNumber Found!\t\t%d\t\t%d",rank,i);
- flag=1;
- }
- printf("\n%d\t\t%d",b[i],rank);
- }
- }
- if(rank==1)
- {
- for( i=0;i<5;i++)
- {
- if(b[i]==search)
- {
- printf("\nNumber Found!\t\t%d\t\t%d",rank,i);
- flag=1;
- }
- printf("\n%d\t\t%d",b[i],rank);
- }
- }
- //Program to find biggest and smallest of three numbers in two different cores//
- #include<stdio.h>
- #include<omp.h>
- #include<time.h>
- int main()
- {
- int a=50;
- int b=100;
- int c=150;
- clock_t start_clock = clock();
- #pragma omp parallel
- {
- int id = omp_get_thread_num();
- if(id==0)
- {
- if(a>b && a>c)
- printf("Biggest number is %d\n",a);
- else if(b>a && b>c)
- printf("Biggest number is %d\n",b);
- else
- printf("Biggest number is %d\n",c);
- }
- else
- {
- if(a<b && a<c)
- printf("Smallest number is %d\n",a);
- else if(b<a && b<c)
- printf("Smallest number is %d\n",b);
- else
- printf("Smallest number is %d\n",c);
- }
- }
- clock_t end_clock = clock();
- printf("\nProgram Execution Time : %ld ms\n\n",(end_clock-start_clock));
- return 0;
- }
- //Program to perform matrix multiplication(omp prog)//
- #include<stdio.h>
- #include<omp.h>
- #include<time.h>
- int main()
- {
- int i=0,j=0,k=0;
- int a[2][2]={{1,2},{3,4}};
- int b[2][2]={{1,2},{3,4}};
- int c[2][2]={{0,0},{0,0}};
- clock_t start_clock = clock();
- #pragma omp parallel for
- for(i=0;i<2;i++)
- {
- for(j=0;j<2;j++)
- {
- for(k=0;k<2;k++)
- {
- c[i][j]=c[i][j]+a[i][k]*b[k][j];
- }
- }
- }
- clock_t end_clock = clock();
- printf("\nProgram Execution Time : %ld ms\n\n",(end_clock-start_clock));
- printf("Resultant Matrix\n");
- for(i=0;i<2;i++)
- {
- for(j=0;j<2;j++)
- {
- printf("C[%d][%d]=%d\n",i,j,c[i][j]);
- }
- }
- return 0;
- }
- //MPI program to find sum of an array//
- #include "mpi.h"
- #include <stdio.h>
- #include <stdlib.h>
- #define ARRAYSIZE 20000000
- #define MASTER 0 double data[ARRAYSIZE];
- int main (int argc, char *argv[])
- {
- int numtasks, taskid, rc, dest, offset, i, j, tag1, tag2, source, chunksize, leftover;
- double mysum, sum;
- double update(int myoffset, int chunk, int myid); MPI_Status status;
- /***** Initializations *****/
- MPI_Init(&argc, &argv);
- MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
- MPI_Comm_rank(MPI_COMM_WORLD,&taskid); printf ("MPI task %d has started... ", taskid); chunksize = (ARRAYSIZE / numtasks); leftover = (ARRAYSIZE % numtasks); tag2 = 1; tag1 = 2;
- /***** Master task only ******/ if (taskid == MASTER){
- /* Initialize the array */
- sum = 0;
- for(i=0; i<ARRAYSIZE; i++) { data[i] = i * 1.0; sum = sum + data[i];
- }
- printf("Initialized array sum = %e\n",sum); printf("numtasks= %d chunksize= %d leftover= %d\ n",numtasks,chunksize,leftover);
- /* Send each task its portion of the array - master keeps 1st part plus
- leftover elements */ offset = chunksize + leftover; for (dest=1; dest<numtasks; dest++) {
- MPI_Send(&offset, 1, MPI_INT, dest, tag1, MPI_COMM_WORLD);
- MPI_Send(&data[offset], chunksize, MPI_DOUBLE, dest, tag2, MPI_COMM_WORLD); printf("Sent %d elements to task %d offset= %d\n",chunksize,dest,offset);
- offset = offset + chunksize; }
- /* Master does its part of the work */
- offset = 0;
- mysum = update(offset, chunksize+leftover, taskid);
- /* Wait to receive results from each task */
- for (i=1; i<numtasks; i++) { source = i;
- MPI_Recv(&offset, 1, MPI_INT, source, tag1, MPI_COMM_WORLD, &status);
- MPI_Recv(&data[offset], chunksize, MPI_DOUBLE, source, tag2,
- MPI_COMM_WORLD, &status);
- }
- /* Get final sum and print sample results */
- MPI_Reduce(&mysum, &sum, 1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD);
- printf("Sample results: \n");
- offset = 0;
- for (i=0; i<numtasks; i++) { for (j=0; j<5; j++) printf(" %e",data[offset+j]);
- printf("\n");
- offset = offset + chunksize;
- }
- printf("*** Final sum= %e ***\n",sum); } /* end of master section */
- /***** Non-master tasks only *****/ if (taskid > MASTER) {
- /* Receive my portion of array from the master task */
- source = MASTER;
- MPI_Recv(&offset, 1, MPI_INT, source, tag1, MPI_COMM_WORLD, &status);
- MPI_Recv(&data[offset], chunksize, MPI_DOUBLE, source, tag2,
- MPI_COMM_WORLD, &status);
- /* Do my part of the work */
- mysum = update(offset, chunksize, taskid);
- /* Send my results back to the master task */ dest = MASTER;
- MPI_Send(&offset, 1, MPI_INT, dest, tag1, MPI_COMM_WORLD);
- MPI_Send(&data[offset], chunksize, MPI_DOUBLE, MASTER, tag2, MPI_COMM_WORLD);
- /* Use sum reduction operation to obtain final sum */
- MPI_Reduce(&mysum, &sum, 1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD); } /* end of non-master */
- MPI_Finalize(); } /* end of main */
- double update(int myoffset, int chunk, int myid) { int i; double mysum;
- /* Perform addition to each of my array elements and keep my sum */
- mysum = 0;
- for(i=myoffset; i < myoffset + chunk; i++) {
- data[i] = data[i] + (i * 1.0); mysum = mysum + data[i];
- }
- printf("Task %d mysum = %e\n",myid,mysum);
- return(mysum);
- }
- //MPI program for matrix multiplication//
- #include "mpi.h"
- #include <stdio.h>
- #include <stdlib.h>
- #define NRA 62 /* number of rows in matrix A */
- #define NCA 15 /* number of columns in matrix A */
- #define NCB 7 /* number of columns in matrix B */
- #define MASTER 0 /* taskid of first task */
- #define FROM_MASTER 1 /* setting a message type */ #define FROM_WORKER 2 /* setting a message type */
- int main (int argc, char *argv[])
- {
- int numtasks, /* number of tasks in partition */
- taskid, /* a task identifier */ numworkers, /* number of worker tasks */ source, /* task id of message source */ dest, /* task id of message destination */ mtype, /* message type */
- rows, /* rows of matrix A sent to each worker */ averow, extra, offset, /* used to determine rows sent to each worker */ i, j, k, rc; /* misc */
- double a[NRA][NCA], /* matrix A to be multiplied */ b[NCA][NCB], /* matrix B to be multiplied */ c[NRA][NCB]; /* result matrix C */ MPI_Status status;
- MPI_Init(&argc,&argv);
- MPI_Comm_rank(MPI_COMM_WORLD,&taskid); MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
- if (numtasks < 2 ) {
- printf("Need at least two MPI tasks. Quitting...\n");
- MPI_Abort(MPI_COMM_WORLD, rc); exit(1);
- }
- numworkers = numtasks-1;
- /**************************** master task ************************************/ if (taskid == MASTER)
- {
- printf("mpi_mm has started with %d tasks.\n",numtasks); printf("Initializing arrays...\n"); for (i=0; i<NRA; i++) for (j=0; j<NCA; j++) a[i][j]= i+j;
- for (i=0; i<NCA; i++) for (j=0; j<NCB; j++) b[i][j]= i*j;
- /* Send matrix data to the worker tasks */ averow = NRA/numworkers; extra = NRA%numworkers;
- offset = 0;
- mtype = FROM_MASTER;
- for (dest=1; dest<=numworkers; dest++)
- {
- rows = (dest <= extra) ? averow+1 : averow;
- printf("Sending %d rows to task %d offset=%d\n",rows,dest,offset);
- MPI_Send(&offset, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
- MPI_Send(&rows, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
- MPI_Send(&a[offset][0], rows*NCA, MPI_DOUBLE, dest, mtype,
- MPI_COMM_WORLD);
- MPI_Send(&b, NCA*NCB, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD); offset = offset + rows; }
- /* Receive results from worker tasks */ mtype = FROM_WORKER;
- for (i=1; i<=numworkers; i++)
- { source = i;
- MPI_Recv(&offset, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
- MPI_Recv(&rows, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
- MPI_Recv(&c[offset][0], rows*NCB, MPI_DOUBLE, source, mtype, MPI_COMM_WORLD, &status); printf("Received results from task %d\n",source);
- }
- /* Print results */
- printf("******************************************************\n"); printf("Result Matrix:\n"); for (i=0; i<NRA; i++)
- {
- printf("\n"); for (j=0; j<NCB; j++) printf("%6.2f ", c[i][j]);
- }
- printf("\n******************************************************\n"); printf ("Done.\n");
- }
- /**************************** worker task ************************************/ if (taskid > MASTER)
- {
- mtype = FROM_MASTER;
- MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
- MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
- MPI_Recv(&a, rows*NCA, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD,
- &status);
- MPI_Recv(&b, NCA*NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD,
- &status);
- for (k=0; k<NCB; k++) for (i=0; i<rows; i++)
- {
- c[i][k] = 0.0; for (j=0; j<NCA; j++)
- c[i][k] = c[i][k] + a[i][j] * b[j][k];
- }
- mtype = FROM_WORKER;
- MPI_Send(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
- MPI_Send(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
- MPI_Send(&c, rows*NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD);
- }
- MPI_Finalize(); }
- //. MPI program to find integration of function sin(X) over the range 0 to 2 pi MPI_Bcast to send information to each participating process and MPI_Reduce to get a grand total of the areas computed by each participating process.//
- #include <stdio.h>
- #include <math.h>
- #include <mpi.h> #define PI 3.1415926535
- main(int argc, char **argv)
- {
- int my_id, root_process, num_procs, ierr, num_intervals, i; double rect_width, area, sum, x_middle, partial_sum; MPI_Status status;
- /* Let process 0 be the root process. */
- root_process = 0; /* Now replicate this process to create parallel processes. */ ierr = MPI_Init(&argc, &argv); /* Find out MY process ID, and how many processes were started. */
- ierr = MPI_Comm_rank(MPI_COMM_WORLD, &my_id); ierr = MPI_Comm_size(MPI_COMM_WORLD, &num_procs); if(my_id == root_process) {
- /* I must be the root process, so I will query the user to determine how many interpolation intervals to use. */
- printf("Please enter the number of intervals to interpolate: "); scanf("%i", &num_intervals); }
- /* Then...no matter which process I am:
- *
- * I engage in a broadcast so that the number of intervals is * sent from the root process to the other processes, and ...
- **/
- ierr = MPI_Bcast(&num_intervals, 1, MPI_INT, root_process, MPI_COMM_WORLD); /* calculate the width of a rectangle, and */ rect_width = PI / num_intervals;
- /* then calculate the sum of the areas of the rectangles for
- * which I am responsible. Start with the (my_id +1)th * interval and process every num_procs-th interval thereafter.
- **/ partial_sum = 0;
- for(i = my_id + 1; i <num_intervals + 1; i += num_procs) {
- /* Find the middle of the interval on the X-axis. */
- x_middle = (i - 0.5) * rect_width; area = sin(x_middle) * rect_width; partial_sum = partial_sum + area;
- }
- printf("proc %i computes: %f\n", my_id, (float)partial_sum);
- /* and finally, engage in a reduction in which all partial sums
- * are combined, and the grand sum appears in variable "sum" in
- * the root process,
- **/
- ierr = MPI_Reduce(&partial_sum, &sum, 1, MPI_DOUBLE, MPI_SUM, root_process, MPI_COMM_WORLD); /* and, if I am the root process, print the result. */
- if(my_id == root_process) {
- printf("The integral is %f\n", (float)sum);
- /* (yes, we could have summed just the heights, and
- * postponed the multiplication by rect_width til now.) */
- } /* Close down this processes. */
- ierr = MPI_Finalize(); }