ADA_FILE_sj (2)
ADA_FILE_sj (2)
Sorting Algorithms
a) Merge Sort
Explanation:
Merge Sort is a divide-and-conquer algorithm that splits an array into halves, recursively
sorts each half, and merges the sorted halves to produce the final sorted array.
Code:
#include <iostream>
using namespace std;
int i = 0, j = 0, k = left;
while (i < n1 && j < n2) {
if (L[i] <= R[j]) arr[k++] = L[i++];
else arr[k++] = R[j++];
}
while (i < n1) arr[k++] = L[i++];
while (j < n2) arr[k++] = R[j++];
}
int main() {
int arr[] = {12, 11, 13, 5, 6, 7};
int arr_size = sizeof(arr) / sizeof(arr[0]);
mergeSort(arr, 0, arr_size - 1);
for (int i = 0; i < arr_size; i++) cout << arr[i] << " ";
return 0;
}
Explanation:
Quick Sort is a divide-and-conquer algorithm that selects a pivot, partitions the array around
the pivot, and recursively sorts the partitions.
Code:
#include <iostream>
using namespace std;
int main() {
int arr[] = {10, 7, 8, 9, 1, 5};
int n = sizeof(arr) / sizeof(arr[0]);
quickSort(arr, 0, n - 1);
for (int i = 0; i < n; i++) cout << arr[i] << " ";
return 0;
}
Explanation:
Bubble Sort repeatedly steps through the list, compares adjacent elements, and swaps them if
they are in the wrong order.
Code:
#include <iostream>
using namespace std;
int main() {
int arr[] = {64, 34, 25, 12, 22, 11, 90};
int n = sizeof(arr) / sizeof(arr[0]);
bubbleSort(arr, n);
for (int i = 0; i < n; i++) cout << arr[i] << " ";
return 0;
}
Merge Sort
______________
Input: [12, 11, 13, 5, 6, 7]
Output: [ 5, 6, 7, 11, 12, 13]
Quick Sort
______________
Input: [10, 7, 8, 9, 1, 5]
Output: [ 1, 5, 7, 8, 9, 10]
Bubble Sort
______________
Input: [64, 34, 25, 12, 22, 11, 90]
Output: [11, 12, 22, 25, 34, 64, 90]
2. Search Algorithms
a) Linear Search
Explanation:
Linear Search iteratively checks each element in the array to find the target.
Code:
#include <iostream>
using namespace std;
int main() {
int arr[] = {2, 3, 4, 10, 40};
int x = 10;
int result = linearSearch(arr, 5, x);
cout << "Element found at index " << result;
return 0;
}
Explanation:
Binary Search divides the sorted array into halves to locate the target element efficiently.
Code:
#include <iostream>
using namespace std;
int main() {
int arr[] = {2, 3, 4, 10, 40};
int x = 10;
int n = sizeof(arr) / sizeof(arr[0]);
int result = binarySearch(arr, 0, n - 1, x);
cout << "Element found at index " << result;
return 0;
}
Linear Search
______________
Input: Array: [2, 3, 4, 10, 40], Target: 10
Output: Element found at index 3
Binary Search
______________
Input: Sorted Array: [2, 3, 4, 10, 40], Target: 10
Output: Element found at index 3
3. Huffman Coding
Explanation:
Huffman Coding is a greedy algorithm used for data compression, where frequently
occurring characters are represented with shorter codes.
Code:
#include <iostream>
#include <queue>
#include <vector>
#include <map>
using namespace std;
struct Node {
char ch;
int freq;
Node* left;
Node* right;
struct Compare {
bool operator()(Node* l, Node* r) {
return l->freq > r->freq;
}
};
while (minHeap.size() != 1) {
Node* left = minHeap.top(); minHeap.pop();
Node* right = minHeap.top(); minHeap.pop();
Node* top = new Node('$', left->freq + right->freq);
top->left = left;
top->right = right;
minHeap.push(top);
}
printCodes(minHeap.top(), "");
}
int main() {
char arr[] = {'a', 'b', 'c', 'd', 'e', 'f'};
int freq[] = {5, 9, 12, 13, 16, 45};
int size = sizeof(arr) / sizeof(arr[0]);
HuffmanCodes(arr, freq, size);
return 0;
}
• Complexity: O(n log n) due to priority queue operations for merging nodes
Huffman Coding
______________
Input: Characters: [a, b, c, d, e, f], Frequencies: [5, 9, 12, 13, 16, 45]
Output:
f: 0
c: 100
d: 101
a: 1100
b: 1101
e: 111
4. Minimum Spanning Tree
Kruskal’s Algorithm
Explanation:
Kruskal's algorithm finds a Minimum Spanning Tree (MST) by selecting edges in ascending
order of weight, ensuring that no cycles are formed.
Code:
#include <iostream>
#include <vector>
#include <algorithm>
using namespace std;
struct Edge {
int src, dest, weight;
};
struct Graph {
int V, E;
vector<Edge> edges;
};
struct DisjointSets {
int *parent, *rank;
DisjointSets(int n) {
parent = new int[n+1];
rank = new int[n+1];
for (int i = 0; i <= n; i++) {
parent[i] = i;
rank[i] = 0;
}
}
int find(int u) {
if (u != parent[u])
parent[u] = find(parent[u]);
return parent[u];
}
void merge(int u, int v) {
u = find(u), v = find(v);
if (rank[u] > rank[v]) parent[v] = u;
else parent[u] = v;
if (rank[u] == rank[v]) rank[v]++;
}
};
vector<Edge> mst;
for (Edge& edge : graph.edges) {
int uRep = ds.find(edge.src);
int vRep = ds.find(edge.dest);
if (uRep != vRep) {
mst.push_back(edge);
ds.merge(uRep, vRep);
}
}
cout << "Edges in the MST:\n";
for (Edge& edge : mst) {
cout << edge.src << " - " << edge.dest << ": " << edge.weight << endl;
}
}
int main() {
Graph graph = {4, 5, {{0, 1, 10}, {0, 2, 6}, {0, 3, 5}, {1, 3, 15}, {2, 3, 4}}};
KruskalMST(graph);
return 0;
}
• Complexity: O(E log E), where E is the number of edges (due to sorting).
OUTPUT
Kruskal's Algorithm
______________
Input: Vertices: 4, Edges: [(0,1,10), (0,2,6), (0,3,5), (1,3,15), (2,3,4)]
Output:
Edges in the MST:
2 - 3: 4
0 - 3: 5
0 - 1: 10
5. Dijkstra's Algorithm
Explanation:
Dijkstra’s algorithm finds the shortest path from a source node to all other nodes in a
weighted graph with non-negative weights using a priority queue.
Code:
#include <iostream>
#include <vector>
#include <queue>
using namespace std;
while (!pq.empty()) {
int u = pq.top().second;
pq.pop();
for (auto& [weight, v] : graph[u]) {
if (dist[u] + weight < dist[v]) {
dist[v] = dist[u] + weight;
pq.push({dist[v], v});
}
}
}
cout << "Vertex Distance from Source " << src << "\n";
for (int i = 0; i < V; i++) cout << i << ": " << dist[i] << "\n";
}
int main() {
int V = 5;
vector<vector<pair<int, int>>> graph(V);
graph[0] = {{2, 1}, {4, 2}};
graph[1] = {{7, 3}};
graph[2] = {{3, 3}};
graph[3] = {{1, 4}};
graph[4] = {};
dijkstra(graph, 0);
return 0;
}
• Complexity: O((V + E) log V), where V is the number of vertices and E is the
number of edges.
Dijkstra's Algorithm
______________
Input: Graph: 0 -> (1, 2), (2, 4), 1 -> (3, 7), 2 -> (3, 3), 3 -> (4, 1), 4 -> None
Output:
Vertex Distance from Source 0
0: 0
1: 2
2: 4
3: 7
4: 8
6. Bellman-Ford Algorithm
Explanation:
Bellman-Ford algorithm finds the shortest paths from a source vertex to all other vertices in a
graph with possible negative weights.
Code:
#include <iostream>
#include <vector>
using namespace std;
struct Edge {
int src, dest, weight;
};
cout << "Vertex Distance from Source " << src << "\n";
for (int i = 0; i < V; i++) cout << i << ": " << dist[i] << "\n";
}
int main() {
int V = 5;
vector<Edge> edges = {{0, 1, -1}, {0, 2, 4}, {1, 2, 3}, {1, 3, 2}, {1, 4, 2}, {3, 2, 5}, {3, 1, 1}, {4, 3, -3}};
bellmanFord(edges, V, 0);
return 0;
}
• Complexity: O(V * E), where V is the number of vertices and E is the number of
edges.
OUTPUT
Bellman-Ford Algorithm
______________
Input: Graph: Vertices: 5, Edges: [(0,1,-1), (0,2,4), (1,2,3), (1,3,2), (1,4,2), (3,2,5), (3,1,1), (4,3,-3)]
Output:
Vertex Distance from Source 0
0: 0
1: -1
2: 2
3: -2
4: 1
7. N Queen’s Problem (Backtracking)
Explanation:
The N Queen’s problem is solved by placing N queens on an NxN chessboard such that no
two queens threaten each other using backtracking.
Code:
#include <iostream>
using namespace std;
int main() {
int N = 4;
int board[N];
solveNQueens(board, 0, N);
return 0;
}
N Queen’s Problem
______________
Input: N: 4
Output:
[2, 4, 1, 3]
[3, 1, 4, 2]
8. Matrix Multiplication
Explanation:
Matrix Multiplication involves multiplying two matrices and producing a resultant matrix.
Code:
#include <iostream>
using namespace std;
int main() {
int A[2][2] = {{1, 2}, {3, 4}};
int B[2][2] = {{5, 6}, {7, 8}};
matrixMultiplication(A, B);
return 0;
}
Matrix Multiplication
______________
Input: Matrix A: [1, 2], [3, 4], Matrix B: [5, 6], [7, 8]
Output:
Resultant Matrix:
[19, 22]
[43, 50]
9. Longest Common Subsequence (LCS)
Explanation:
The LCS algorithm finds the longest subsequence common to two sequences using dynamic
programming.
Code:
#include <iostream>
#include <string>
using namespace std;
int main() {
string X = "AGGTAB";
string Y = "GXTXAYB";
cout << "Length of LCS is " << lcs(X, Y) << endl;
return 0;
}
• Complexity: O(m * n), where m and n are the lengths of the sequences.
OUTPUT
Explanation:
The Naïve String Matching algorithm checks for a match by comparing each substring of the
text with the pattern.
Code:
#include <iostream>
#include <string>
using namespace std;
int main() {
string text = "AABAACAADAABAABA";
string pattern = "AABA";
naiveSearch(text, pattern);
return 0;
}
• Complexity: O((n - m + 1) * m), where n is the length of the text and m is the length
of the pattern.
OUTPUT
Explanation:
The Rabin-Karp algorithm uses a hash function to compute the hash of the pattern and
compare it with hashes of substrings in the text, reducing unnecessary comparisons.
Code:
#include <iostream>
#include <string>
using namespace std;
#define d 256
const int q = 101; // A prime number
int main() {
string text = "GEEKS FOR GEEKS";
string pattern = "GEEK";
rabinKarp(text, pattern);
return 0;
}
Time Complexity Analysis:
Rabin-Karp Algorithm
______________
Input: Text: "BEERS FOR BEERS", Pattern: "BEER"
Output:
Pattern found at index 0
Pattern found at index 10
c) Knuth-Morris-Pratt (KMP) Algorithm
Explanation:
The KMP algorithm preprocesses the pattern to create a "longest prefix suffix" (LPS) array,
which helps skip unnecessary comparisons in the search process.
Code:
#include <iostream>
#include <string>
#include <vector>
using namespace std;
computeLPSArray(pattern, lps);
int main() {
string text = "ABABDABACDABABCABAB";
string pattern = "ABABCABAB";
KMPSearch(text, pattern);
return 0;
}