Program 1:
Introduction to parallel programming using OpenMP - Write a OpenMP program to find
the largest of three numbers and smallest of three numbers using explicit thread
identification.
#include <stdio.h>
#include <omp.h>
int main() {
// main function to perform two operations and defining the numbers, and variables to
store largest and smallest
int a, b, c;
int largest, smallest;
// Input three numbers
printf("Enter three numbers: ");
scanf("%d %d %d", &a, &b, &c);
// Record start time
double start_time = omp_get_wtime();
// Parallel region with 2 threads
#pragma omp parallel num_threads(2)
int tid = omp_get_thread_num();
// calling function to get thread id
// the two tasks(finding largest and smallest) are divided between 2 threads id=0,1
if (tid == 0) {
// Thread 0 computes the largest
if (a >= b && a >= c)
largest = a;
else if (b >= a && b >= c)
largest = b;
// We know that if a is not larger than b and c, b is not larger than a and c that
means c is the largest
else
largest = c;
printf("Thread %d found the largest: %d\n", tid, largest);
if (tid == 1) {
// Thread 1 computes the smallest
if (a <= b && a <= c)
smallest = a;
else if (b <= a && b <= c)
smallest = b;
// We know that if a is not smaller than b and c, b is not smaller than a and c
that means c is the smallest
else
smallest = c;
printf("Thread %d found the smallest: %d\n", tid, smallest);
// Record end time
double end_time = omp_get_wtime();
double exec_time = end_time - start_time;
// Final output from main thread
printf("\nFinal Result:\n");
printf("Largest: %d\n", largest);
printf("Smallest: %d\n", smallest);
printf("Execution Time: %f seconds\n", exec_time);
// Using this program we get to know how fast the task of finding the largest and
smallest of 3 numbers gets done using thread level parallelism
return 0;
Program 2: OpenMP work sharing constructs - Write a program to perform matrix
operations and compare the sequential and parallel execution time
#include <stdio.h>
#include <omp.h>
// Giving matrix size = 500*500
#define SIZE 500 // Adjust size as needed for your system
int main() {
int A[SIZE][SIZE], B[SIZE][SIZE], C_seq[SIZE][SIZE], C_par[SIZE][SIZE];
// We are defining 4 matrices(as double dimensional arrays) A, B, C_seq and C_par
each of dimensions 500*500 total 250000 cells
int i, j;
double start, end;
// Initialize matrices A and B with sample values
for (i = 0; i < SIZE; i++) {
for (j = 0; j < SIZE; j++) {
A[i][j] = i + j;
B[i][j] = i - j;
// A will have array elements 0 1 2 3 … 499
1 2 3 4 … 500
2 3 4 5 … 501
…
499 500 501 … 998
// B will have array elements 0 -1 -2 -3 … -499
1 0 -1 -2 … -498
2 1 0 -1 … -497
499 498 497 496… 0
// Sequential matrix addition
start = omp_get_wtime();
for (i = 0; i < SIZE; i++) {
for (j = 0; j < SIZE; j++) {
C_seq[i][j] = A[i][j] + B[i][j];
}
// C_seq = 0 0 0 0 … 0
2 2 2 2 … 2
4 4 4 4 … 4
6 6 6 6 … 6
998 998 998 … 998
end = omp_get_wtime();
double seq_time = end - start;
printf("Sequential Execution Time: %f seconds\n", seq_time);
// Parallel matrix addition
start = omp_get_wtime();
#pragma omp parallel for private(j)
for (i = 0; i < SIZE; i++) {
for (j = 0; j < SIZE; j++) {
C_par[i][j] = A[i][j] + B[i][j];
// C_par = 0 0 0 0 … 0
2 2 2 2 … 2
4 4 4 4 … 4
6 6 6 6 … 6
998 998 998 … 998
end = omp_get_wtime();
double par_time = end - start;
printf("Parallel Execution Time: %f seconds\n", par_time);
// Here we will observe that Parallel execution time is less than sequential execution
time
// Check correctness
int correct = 1;
for (i = 0; i < SIZE; i++) {
for (j = 0; j < SIZE; j++) {
if (C_seq[i][j] != C_par[i][j]) {
correct = 0;
break;
} // Here we are checking if the matrices obtained after addition using
sequential and parallel methods are same or not
if (correct)// correct is an integer but we are using it as boolean (1 = True, 0 = False)
printf("Matrix addition is correct.\n");
else
printf("Matrix addition is incorrect!\n");
return 0;
Program 3: OpenMP synchronization constructs- Demonstrate an example by using
Single, Master, Barrier, and critical constructs of OpenMP.
#include <stdio.h>
#include <omp.h>
int main() {
int i, sum = 0;
#pragma omp parallel num_threads(4)
int tid = omp_get_thread_num();
// Only one thread (any one) executes this block
#pragma omp single
printf("Thread %d is executing SINGLE section (e.g., reading input)\n", tid);
// Barrier ensures all threads wait here before continuing
#pragma omp barrier
// All threads execute this, but only master does the print
#pragma omp master
printf("MASTER thread (%d) is executing this block\n", tid);
// All threads compute and update shared sum (with critical section)
#pragma omp critical
sum += tid;
printf("Thread %d updated sum to %d (inside CRITICAL section)\n", tid, sum);
// All threads continue after this point
#pragma omp barrier
printf("Thread %d completed execution\n", tid);
// Final output from main thread
printf("Final sum is: %d\n", sum);
return 0;