0% found this document useful (0 votes)
232 views26 pages

Parallel Computing Lab Manual

Uploaded by

Shruthi Gowda
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
232 views26 pages

Parallel Computing Lab Manual

Uploaded by

Shruthi Gowda
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 26

Program 1

Write a OpenMP program to sort an array on n elements using both sequential


and parallel mergesort(using Section). Record the difference in execution time.
#include <iostream>
#include <omp.h>
#include <vector>
#include <cstdlib>
#include <ctime>
#include <algorithm>

using namespace std;

// Merge function for mergesort


void merge(vector<int> &arr, int left, int mid, int right) {
int n1 = mid - left + 1;
int n2 = right - mid;

vector<int> L(n1), R(n2);


for (int i = 0; i < n1; ++i)
L[i] = arr[left + i];
for (int j = 0; j < n2; ++j)
R[j] = arr[mid + 1 + j];

int i = 0, j = 0, k = left;

while (i < n1 && j < n2)


arr[k++] = (L[i] <= R[j]) ? L[i++] : R[j++];

while (i < n1) arr[k++] = L[i++];


while (j < n2) arr[k++] = R[j++];
}

// Sequential mergesort
void sequentialMergeSort(vector<int> &arr, int left, int right) {
if (left < right) {
int mid = (left + right) / 2;
sequentialMergeSort(arr, left, mid);
sequentialMergeSort(arr, mid + 1, right);
merge(arr, left, mid, right);
}
}

// Parallel mergesort using OpenMP sections


void parallelMergeSort(vector<int> &arr, int left, int right) {
if (left < right) {
int mid = (left + right) / 2;

#pragma omp parallel sections


{
#pragma omp section
parallelMergeSort(arr, left, mid);

#pragma omp section


parallelMergeSort(arr, mid + 1, right);
}

merge(arr, left, mid, right);


}
}
int main() {
int n;
cout << "Enter number of elements in the array: ";
cin >> n;

vector<int> original(n);
srand(time(0));
for (int i = 0; i < n; ++i)
original[i] = rand() % 10000;

vector<int> arr_seq = original;


vector<int> arr_par = original;

// Sequential MergeSort
double start_seq = omp_get_wtime();
sequentialMergeSort(arr_seq, 0, n - 1);
double end_seq = omp_get_wtime();

// Parallel MergeSort
double start_par = omp_get_wtime();
parallelMergeSort(arr_par, 0, n - 1);
double end_par = omp_get_wtime();

cout << "\nExecution Time (Sequential MergeSort): " << (end_seq - start_seq) << "
seconds";
cout << "\nExecution Time (Parallel MergeSort using OpenMP sections): " <<
(end_par - start_par) << " seconds";

// Optional: validate correctness


if (arr_seq == arr_par)
cout << "\n\n Both sorted arrays are identical." << endl;
else
cout << "\n\n Sorted arrays do not match!" << endl;

return 0;
}
Output:
g++ -fopenmp mergesort_omp.cpp -o mergesort
./mergesort
Enter number of elements in the array: 100000
Execution Time (Sequential MergeSort): 0.04375 seconds
Execution Time (Parallel MergeSort using OpenMP sections): 0.01932 seconds
Both sorted arrays are identical.
Program 2
Write an OpenMP program that divides the Iterations into chunks containing 2
iterations, respectively (OMP_SCHEDULE=static,2). Its input should be the number
of iterations, and its output should be which iterations of a parallelized for loop
are executed by which thread. For example, if there are two threads and four
iterations, the output might be the following:
a. Thread 0 : Iterations 0 −− 1
b. Thread 1 : Iterations 2 – 3

#include <iostream>
#include <omp.h>
#include <vector>

using namespace std;

int main() {
int n;
cout << "Enter number of iterations: ";
cin >> n;

// Set the schedule to static with chunk size 2


omp_set_schedule(omp_sched_static, 2);

// Optional: fix number of threads for consistency


// omp_set_num_threads(2);

vector<vector<int>> thread_iterations(omp_get_max_threads());

#pragma omp parallel


{
int tid = omp_get_thread_num();
#pragma omp for schedule(runtime)
for (int i = 0; i < n; i++) {
thread_iterations[tid].push_back(i);
}
}

// Output result
for (int i = 0; i < thread_iterations.size(); ++i) {
if (!thread_iterations[i].empty()) {
cout << "Thread " << i << " : Iterations ";
for (size_t j = 0; j < thread_iterations[i].size(); ++j) {
cout << thread_iterations[i][j];
if (j != thread_iterations[i].size() - 1)
cout << " -- ";
}
cout << endl;
}
}

return 0;
}

Output:
g++ -fopenmp omp_static_chunk.cpp -o omp_static_chunk
export OMP_NUM_THREADS=2
./omp_static_chunk
Program 3
Write a OpenMP program to calculate n Fibonacci numbers using tasks.
#include <iostream>
#include <omp.h>
#include <vector>

using namespace std;

// Recursive Fibonacci with OpenMP tasks


int fib(int n) {
if (n < 2)
return n;

int x, y;

#pragma omp task shared(x)


x = fib(n - 1);

#pragma omp task shared(y)


y = fib(n - 2);

#pragma omp taskwait


return x + y;
}

int main() {
int n;
cout << "Enter the number of Fibonacci numbers to generate: ";
cin >> n;
vector<int> fib_series(n);

double start_time = omp_get_wtime();

#pragma omp parallel


{
#pragma omp single
{
for (int i = 0; i < n; ++i) {
#pragma omp task firstprivate(i)
fib_series[i] = fib(i);
}
}
}

double end_time = omp_get_wtime();

cout << "\nFibonacci Series up to " << n << " terms:\n";


for (int i = 0; i < n; ++i)
cout << fib_series[i] << " ";

cout << "\n\nExecution Time: " << (end_time - start_time) << " seconds\n";

return 0;
}
Output:
g++ -fopenmp fib_tasks.cpp -o fib_tasks
./fib_tasks
Enter the number of Fibonacci numbers to generate: 10
Fibonacci Series up to 10 terms:
0 1 1 2 3 5 8 13 21 34

Execution Time: 0.0003 seconds


Program 4
Write a OpenMP program to find the prime numbers from 1 to n employing
parallel for directive. Record both serial and parallel execution times.
#include <iostream>
#include <omp.h>
#include <vector>
#include <cmath>

using namespace std;

// Function to check if a number is prime


bool isPrime(int num) {
if (num <= 1) return false;
if (num == 2) return true;
if (num % 2 == 0) return false;

int sqrt_n = sqrt(num);


for (int i = 3; i <= sqrt_n; i += 2) {
if (num % i == 0)
return false;
}
return true;
}

int main() {
int n;
cout << "Enter the value of n: ";
cin >> n;

vector<int> primes_serial;
vector<int> primes_parallel(n + 1, 0); // 0: not prime, 1: prime

// Serial Execution
double start_serial = omp_get_wtime();
for (int i = 2; i <= n; ++i) {
if (isPrime(i))
primes_serial.push_back(i);
}
double end_serial = omp_get_wtime();

// Parallel Execution
double start_parallel = omp_get_wtime();
#pragma omp parallel for
for (int i = 2; i <= n; ++i) {
if (isPrime(i)) {
primes_parallel[i] = 1;
}
}
double end_parallel = omp_get_wtime();

// Output results
cout << "\nPrimes (Serial):\n";
for (int p : primes_serial)
cout << p << " ";

cout << "\n\nPrimes (Parallel):\n";


for (int i = 2; i <= n; ++i) {
if (primes_parallel[i] == 1)
cout << i << " ";
}
cout << "\n\nExecution Time (Serial): " << (end_serial - start_serial) << " seconds";
cout << "\nExecution Time (Parallel): " << (end_parallel - start_parallel) << " seconds\
n";

return 0;
}

Output:
g++ -fopenmp find_primes.cpp -o find_primes
./find_primes
Enter the value of n: 20

Primes (Serial):
2 3 5 7 11 13 17 19

Primes (Parallel):
2 3 5 7 11 13 17 19

Execution Time (Serial): 2.5e-05 seconds


Execution Time (Parallel): 1.6e-05 seconds
Program 5
Write a MPI Program to demonstration of MPI_Send and MPI_Recv.
#include <mpi.h>
#include <iostream>

using namespace std;

int main(int argc, char* argv[]) {


int rank, size;
MPI_Init(&argc, &argv); // Initialize MPI environment
MPI_Comm_rank(MPI_COMM_WORLD, &rank); // Get process rank
MPI_Comm_size(MPI_COMM_WORLD, &size); // Get number of processes

if (size < 2) {
if (rank == 0)
cout << "This program requires at least 2 processes.\n";
MPI_Finalize();
return 0;
}

int data;

if (rank == 0) {
data = 42;
cout << "Process 0 sending data: " << data << " to Process 1\n";
MPI_Send(&data, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
} else if (rank == 1) {
MPI_Recv(&data, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
cout << "Process 1 received data: " << data << " from Process 0\n";
}
MPI_Finalize(); // Clean up the MPI environment
return 0;
}
Output:
mpic++ mpi_send_recv.cpp -o mpi_send_recv
mpirun -np 2 ./mpi_send_recv
Process 0 sending data: 42 to Process 1
Process 1 received data: 42 from Process 0
Program 6
Write a MPI program to demonstration of deadlock using point to point
communication and avoidance of deadlock by altering the call sequence.
Deadlock Demonstration Program
#include <mpi.h>
#include <iostream>

using namespace std;

int main(int argc, char *argv[]) {


int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);

if (size != 2) {
if (rank == 0)
cout << "Please run with exactly 2 processes.\n";
MPI_Finalize();
return 0;
}

int send_data = rank + 100;


int recv_data;

if (rank == 0) {
MPI_Send(&send_data, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
MPI_Recv(&recv_data, 1, MPI_INT, 1, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
} else if (rank == 1) {
MPI_Send(&send_data, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Recv(&recv_data, 1, MPI_INT, 0, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
}

cout << "Process " << rank << " received: " << recv_data << endl;

MPI_Finalize();
return 0;
}

Deadlock Avoidance
#include <mpi.h>
#include <iostream>

using namespace std;

int main(int argc, char *argv[]) {


int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);

if (size != 2) {
if (rank == 0)
cout << "Please run with exactly 2 processes.\n";
MPI_Finalize();
return 0;
}

int send_data = rank + 100;


int recv_data;

// Simultaneously send and receive


MPI_Sendrecv(&send_data, 1, MPI_INT, 1 - rank, 0,
&recv_data, 1, MPI_INT, 1 - rank, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);

cout << "Process " << rank << " received: " << recv_data << endl;

MPI_Finalize();
return 0;
}
Rank Based Call Sequence
#include <mpi.h>
#include <iostream>

using namespace std;

int main(int argc, char *argv[]) {


int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);

if (size != 2) {
if (rank == 0)
cout << "Please run with exactly 2 processes.\n";
MPI_Finalize();
return 0;
}
int send_data = rank + 100;
int recv_data;

if (rank == 0) {
MPI_Send(&send_data, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
MPI_Recv(&recv_data, 1, MPI_INT, 1, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
} else {
MPI_Recv(&recv_data, 1, MPI_INT, 0, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
MPI_Send(&send_data, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
}

cout << "Process " << rank << " received: " << recv_data << endl;

MPI_Finalize();
return 0;
}
Output:
mpic++ deadlock_demo.cpp -o deadlock_demo
mpic++ no_deadlock_sendrecv.cpp -o no_deadlock_sendrecv
mpic++ no_deadlock_sequence.cpp -o no_deadlock_sequence
mpirun -np 2 ./deadlock_demo # This will hang
mpirun -np 2 ./no_deadlock_sendrecv
mpirun -np 2 ./no_deadlock_sequence
Program 7
Write a MPI Program to demonstration of Broadcast operation.
#include <mpi.h>
#include <iostream>

using namespace std;

int main(int argc, char *argv[]) {


int rank, size;
int data;

MPI_Init(&argc, &argv); // Initialize MPI


MPI_Comm_rank(MPI_COMM_WORLD, &rank); // Get current process rank
MPI_Comm_size(MPI_COMM_WORLD, &size); // Get total number of processes

if (rank == 0) {
// Root process initializes the data
cout << "Enter a number to broadcast: ";
cin >> data;
}

// Broadcast the data from root (process 0) to all processes


MPI_Bcast(&data, 1, MPI_INT, 0, MPI_COMM_WORLD);

// Every process prints the received data


cout << "Process " << rank << " received data: " << data << endl;

MPI_Finalize(); // Finalize MPI


return 0;
}
Output:
mpic++ mpi_broadcast.cpp -o mpi_broadcast
mpirun -np 4 ./mpi_broadcast
Enter a number to broadcast: 99
Process 0 received data: 99
Process 1 received data: 99
Process 2 received data: 99
Process 3 received data: 99
Program 8
Write a MPI Program demonstration of MPI_Scatter and MPI_Gather.
#include <mpi.h>
#include <iostream>
#include <vector>

using namespace std;

int main(int argc, char* argv[]) {


int rank, size;

MPI_Init(&argc, &argv); // Initialize MPI


MPI_Comm_rank(MPI_COMM_WORLD, &rank); // Get current process rank
MPI_Comm_size(MPI_COMM_WORLD, &size); // Get total number of processes

const int total_elements = size * 2; // Let's send 2 elements to each process


vector<int> send_buffer;

if (rank == 0) {
// Root process initializes data
send_buffer.resize(total_elements);
cout << "Root process initialized data:\n";
for (int i = 0; i < total_elements; ++i) {
send_buffer[i] = i + 1;
cout << send_buffer[i] << " ";
}
cout << endl;
}

// Each process will receive 2 elements


vector<int> recv_buffer(2);

// Scatter 2 integers to each process


MPI_Scatter(send_buffer.data(), 2, MPI_INT,
recv_buffer.data(), 2, MPI_INT,
0, MPI_COMM_WORLD);

cout << "Process " << rank << " received: ";
for (int val : recv_buffer)
cout << val << " ";
cout << endl;

// Each process modifies the data


for (int &val : recv_buffer)
val += rank * 10;

// Gather back the modified data


vector<int> gathered_buffer;
if (rank == 0)
gathered_buffer.resize(total_elements);

MPI_Gather(recv_buffer.data(), 2, MPI_INT,
gathered_buffer.data(), 2, MPI_INT,
0, MPI_COMM_WORLD);

if (rank == 0) {
cout << "Root process gathered modified data:\n";
for (int val : gathered_buffer)
cout << val << " ";
cout << endl;
}

MPI_Finalize();
return 0;
}
Output:
mpic++ mpi_scatter_gather.cpp -o mpi_scatter_gather
mpirun -np 4 ./mpi_scatter_gather
Root process initialized data:
12345678
Process 0 received: 1 2
Process 1 received: 3 4
Process 2 received: 5 6
Process 3 received: 7 8
Root process gathered modified data:
1 2 13 14 25 26 37 38
Program 9
Write a MPI Program to demonstration of MPI_Reduce and MPI_Allreduce
(MPI_MAX, MPI_MIN, MPI_SUM, MPI_PROD)
#include <mpi.h>
#include <iostream>

using namespace std;

int main(int argc, char *argv[]) {


int rank, size;
int value;

MPI_Init(&argc, &argv); // Initialize MPI


MPI_Comm_rank(MPI_COMM_WORLD, &rank); // Get process rank
MPI_Comm_size(MPI_COMM_WORLD, &size); // Get total number of processes

// Each process starts with a unique value (e.g., rank + 1)


value = rank + 1;
cout << "Process " << rank << " has value: " << value << endl;

int result;

// ==== MPI_Reduce ====

if (rank == 0) cout << "\nMPI_Reduce Results (only at root process):\n";

// SUM
MPI_Reduce(&value, &result, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0) cout << "SUM: " << result << endl;
// PRODUCT
MPI_Reduce(&value, &result, 1, MPI_INT, MPI_PROD, 0, MPI_COMM_WORLD);
if (rank == 0) cout << "PROD: " << result << endl;

// MAX
MPI_Reduce(&value, &result, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
if (rank == 0) cout << "MAX: " << result << endl;

// MIN
MPI_Reduce(&value, &result, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD);
if (rank == 0) cout << "MIN: " << result << endl;

// ==== MPI_Allreduce ====

int all_result;

if (rank == 0) cout << "\nMPI_Allreduce Results (on all processes):\n";

// SUM
MPI_Allreduce(&value, &all_result, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
cout << "Process " << rank << " sees SUM: " << all_result << endl;

// PRODUCT
MPI_Allreduce(&value, &all_result, 1, MPI_INT, MPI_PROD, MPI_COMM_WORLD);
cout << "Process " << rank << " sees PROD: " << all_result << endl;

// MAX
MPI_Allreduce(&value, &all_result, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
cout << "Process " << rank << " sees MAX: " << all_result << endl;
// MIN
MPI_Allreduce(&value, &all_result, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD);
cout << "Process " << rank << " sees MIN: " << all_result << endl;

MPI_Finalize(); // Clean up the MPI environment


return 0;
}
Output:
mpic++ mpi_reduce_allreduce.cpp -o mpi_reduce_allreduce
mpirun -np 4 ./mpi_reduce_allreduce
Process 0 has value: 1
Process 1 has value: 2
Process 2 has value: 3
Process 3 has value: 4

MPI_Reduce Results (only at root process):


SUM: 10
PROD: 24
MAX: 4
MIN: 1

MPI_Allreduce Results (on all processes):


Process 0 sees SUM: 10
Process 0 sees PROD: 24
Process 0 sees MAX: 4
Process 0 sees MIN: 1
Process 1 sees SUM: 10
Process 1 sees PROD: 24
Process 1 sees MAX: 4
Process 1 sees MIN: 1

You might also like