Experiment 1: Study Basics of OpenMP API
#include
#include
int main() {
int size;
// Get the size of the matrices from the user
printf(“Enter the size of the matrices: “);
scanf(“%d”, &size);
int A[size][size], B[size][size], C[size][size];
// Get matrix elements from the user for matrices A and B
printf(“Enter elements for Matrix A:\n”);
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
printf(“A[%d][%d]: “, i, j);
scanf(“%d”, &A[i][j]);
}
}
printf(“Enter elements for Matrix B:\n”);
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
printf(“B[%d][%d]: “, i, j);
scanf(“%d”, &B[i][j]);
}
}
// Multiply matrices A and B
#pragma omp parallel for shared(A, B, C) collapse(2)
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
C[i][j] = 0;
for (int k = 0; k < size; ++k) {
C[i][j] += A[i][k] * B[k][j];
}
}
}
// Display the result matrix C
printf(“\nResult Matrix C:\n”);
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
printf(“%d “, C[i][j]);
}
printf(“\n”);
}
return 0;
}
Experiment 2: Message Passing Interface MPI
#include
#include
#include
int main(int argc, char* argv[]) {
int pid, np, elements_per_process, n_elements_received;
MPI_Status status;
// Creation of parallel processes
MPI_Init(&argc, &argv);
// find out process ID and how many processes were started
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
MPI_Comm_size(MPI_COMM_WORLD, &np);
// master process
if (pid == 0) {
int n;
printf(“Enter the size of the array: “);
scanf(“%d”, &n);
// dynamically allocate array ‘a’
int* a = (int*)malloc(n * sizeof(int));
// input array elements
printf(“Enter %d elements for the array:\n”, n);
for (int i = 0; i 1) {
// distribute the portion of the array
// to child processes to calculate
// their partial sums
for (i = 1; i < np – 1; i++) {
index = i * elements_per_process;
MPI_Send(&elements_per_process, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(&a[index], elements_per_process, MPI_INT, i, 0, MPI_COMM_WORLD);
}
// last process adds the remaining elements
index = i * elements_per_process;
int elements_left = n – index;
MPI_Send(&elements_left, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(&a[index], elements_left, MPI_INT, i, 0, MPI_COMM_WORLD);
}
// master process adds its own sub-array
int sum = 0;
for (i = 0; i < elements_per_process; i++)
sum += a[i];
// collects partial sums from other processes
int tmp;
for (i = 1; i < np; i++) {
MPI_Recv(&tmp, 1, MPI_INT, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
int sender = status.MPI_SOURCE;
sum += tmp;
}
// prints the final sum of the array
printf("Sum of array is: %d\n", sum);
// free dynamically allocated memory
free(a);
}
// slave processes
else {
MPI_Recv(&n_elements_received, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
// dynamically allocate array 'a2'
int* a2 = (int*)malloc(n_elements_received * sizeof(int));
// store the received array segment in local array 'a2'
MPI_Recv(a2, n_elements_received, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
// calculate the partial sum
int partial_sum = 0;
for (int i = 0; i < n_elements_received; i++)
partial_sum += a2[i];
// send the partial sum to the root process
MPI_Send(&partial_sum, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
// free dynamically allocated memory
free(a2);
}
// cleans up all MPI state before the exit of the process
MPI_Finalize();
return 0;
}
Experiment 3: RMI Techniques
Experiment 4: Publisher/Subscriber Paradigm
Experiment 5: Web Service using Flask






















