0% found this document useful (0 votes)
22 views19 pages

Keeraiit 2

AITT
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
22 views19 pages

Keeraiit 2

AITT
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 19

10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab

prime_numbers get_prime_numbers(list1)
print(prime_numbers)

keyboard_arrow_down WEEK-1 [2, 3, 5, 7]

1.Create a list of five elements.pass the list to a function and compute the average of five numbers keyboard_arrow_down WEEK-2
def average(numbers):
return sum(numbers)/len(numbers) 1.BFS
numbers=[10,20,30,40,50]
average= average(numbers)
from collections import deque
print(f"The average of the numbers is: {average}")
def bfs(graph, start):
The average of the numbers is: 30.0
visited = set()
queue = deque([start])
2.write a program that prompts a user to enter the element of a list and add the element to a list.write a function maximum(Lst)and visited.add(start)
while queue:
minimum(Lst)to find the maximum and minimum number from the list
vertex = queue.popleft()
print(vertex, end=' ')
for neighbor in graph[vertex]:
n = int(input("Enter no.of Elements :- "))
if neighbor not in visited:
listt = []
visited.add(neighbor)
for i in range (n):
queue.append(neighbor)
i = int(input("Enter the number :-"))
if __name__ == "__main__":
listt.append(i)
graph = {
'A': ['B', 'C'],
def maximum(l):
'B': ['A', D', 'E'],
max=0
'C': ['A', 'F'],
for i in range(len(l)):
'D': ['B'],
if(max<l[i]):
'E': ['B', 'F'],
max=l[i]
'F': ['C', 'E']
print("Maximum of given list :- ",max)
}
def minimum(l):
print("BFS starting from vertex 'A':")
min=l[0]
bfs(graph, 'D')
for i in range(len(l)):
if(min>l[i]):
BFS starting from vertex 'A':
min=l[i]
D B A E C F
print("Minimum of given list :- ",min)

maximum(listt) 2.DFS
minimum(listt)

Enter no.of Elements :- 4 def dfs(graph, start):


Enter the number :-1 visited = set()
Enter the number :-2 stack = [start]
Enter the number :-3
while stack:
Enter the number :-4
vertex = stack.pop()
Maximum of given list :- 4
Minimum of given list :- 1 if vertex not in visited:
visited.add(vertex)
print(vertex)
3.Write a function print_reverse(Lst)to reverse the elements of a list for neighbor in reversed(graph[vertex]):
if neighbor not in visited:
stack.append(neighbor)
def reverse(list): return visited
reversed_list = [] graph = {
for i in range(len(list) - 1, -1, -1): 'A': ['B', 'C'],
reversed_list.append(list[i]) 'B': ['A', 'D', 'E'],
return reversed_list 'C': ['A', 'F'],
my_list = [1, 2, 3, 4, 5] 'D': ['B'],
reversed_list = reverse(my_list) 'E': ['B', 'F'],
print(reversed_list) 'F': ['C', 'E']
}
[5, 4, 3, 2, 1]
print("dataS traversal starting from vertex 'A':")
dfs(graph, 'A')
4.Write a program to return prime numbers from a list
dataS traversal starting from vertex 'A':
A
def is_prime(n): B
for i in range(2, int(n**0.5) + 1): D
if n % i == 0: E
return False F
return True C
def get_prime_numbers(list1): {'A', 'B', 'C', 'D', 'E', 'F'}
prime_numbers = []
for num in list1:
if is_prime(num): 3.DLS
prime_numbers.append(num)
return prime_numbers
def depth_limited_search(node, goal, depth_limit):
list1 = [2, 3, 4, 5, 6, 7, 8, 9, 10]
if node == goal:
prime numbers = get prime numbers(list1)
[ d ]
https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 1/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 2/38
10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab
return [node] print(np.sin(a))
elif depth_limit == 0: print(np.cos(a))
return None print(np.tan(a))
else: print(np.sum(a))
for child in get_children(node): print(np.transpose(a))
result = depth_limited_search(child, goal, depth_limit - 1)
if result is not None: [91 32 11 13 15]
return [node] + result [89 28 5 5 5]
return None [90 60 24 36 50]
[90. 15. 2.66666667 2.25 2. ]
260
graph = { [ 0.89399666 -0.98803162 0.98935825 0.41211849 -0.54402111]
[-0.44807362 0.15425145 -0.14550003 -0.91113026 -0.83907153]
'A': ['B', 'C'],
[-1.99520041 -6.4053312 -6.79971146 -0.45231566 0.64836083]
'B': ['A', 'D', 'E'],
147
'C': ['A', 'F'], [90 30 8 9 10]
'D': ['B'],
'E': ['B', 'F'],

}
'F': ['C', 'E']
keyboard_arrow_down WEEK-4
def get_children(node):
1.Performing exploratory data analysis on iris.csv data set.
return graph.get(node, [])

start_node = 'A' import matplotlib.pyplot as plt


goal_node = 'F' import pandas as pd
depth_limit = 2 data=pd.read_csv("/content/Iris (3).csv")
print(data)
result = depth_limited_search(start_node, goal_node, depth_limit)
Id SepalLengthCm SepalWidthCm PetalLengthCm PetalWidthCm \
if result is not None: 0 1 5.1 3.5 1.4 0.2
print(f"Path from {start_node} to {goal_node}: {result}") 1 2 4.9 3.0 1.4 0.2
else: 2 3 4.7 3.2 1.3 0.2
print(f"No path found within depth limit of {depth_limit}") 3 4 4.6 3.1 1.5 0.2
4 5 5.0 3.6 1.4 0.2
Path from A to F: ['A', 'C', 'F'] .. ... ... ... ... ...
145 146 6.7 3.0 5.2 2.3
146 147 6.3 2.5 5.0 1.9

keyboard_arrow_down WEEK-3
147 148 6.5 3.0 5.2 2.0
148 149 6.2 3.4 5.4 2.3
149 150 5.9 3.0 5.1 1.8

Species
1.Write a program to Perform operations of NumPy in python -type -length – ndim- shape -reshape -arange -item size -dtype. 0 Iris-setosa
1 Iris-setosa
2 Iris-setosa
import numpy as np 3 Iris-setosa
x=np.array([1,2,3,4,5,6]) 4 Iris-setosa
.. ...
print(f"Type:{type(x)}")
145 Iris-virginica
print(f"Lenght:{len(x)}")
146 Iris-virginica
print(f"No of dimentions in array : {x.ndim}")
147 Iris-virginica
print(f"Shape:{x.shape}") 148 Iris-virginica
rs=x.reshape((2,3)) 149 Iris-virginica
print(f"Shape of Reshaped array : {rs.shape}")
print(f"Reshaped array : {rs}") [150 rows x 6 columns]
print(f"Original array : {x}")
h=np.array([[1,2,3],[4,5,6]])
print(h) import seaborn as sns
print(h.ndim) import matplotlib.pyplot as plt
plt.figure(figsize=(3,3))
Type:<class 'numpy.ndarray'> sns.scatterplot(x="SepalLengthCm",y="SepalWidthCm",data=data)
Lenght:6 plt.show()
No of dimentions in array : 1
Shape:(6,)
Shape of Reshaped array : (2, 3)
Reshaped array : [[1 2 3]
[4 5 6]]
Original array : [1 2 3 4 5 6]
[[1 2 3]
[4 5 6]]
2

2.Write a program in python to perform NumPy arithmetic operations. -add -subtract - multiply -divide -dot -sin, cos, tan -sum -transpose –
power.

import numpy as np
a = np.array([90,30,8,9,10])
b= np.array([1,2,3,4,5])
print(a+b)
print(a-b)
print(a*b)
print(a/b)
print(np.dot(a,b))

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 3/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 4/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab
plt.figure(figsize=(3,3)) 'SepalWidthCm',
sns.countplot(x='Species',data=data) 'PetalLengthCm',
plt.show() 'PetalWidthCm',
'Species']

data.nunique()

Id 150
SepalLengthCm 35
SepalWidthCm 23
PetalLengthCm 43
PetalWidthCm 22
Species 3
dtype: int64

numerical_df=data.drop(['Species'],axis=1)
sns.heatmap(numerical_df.corr(),annot=True)
plt.show()

sns.pairplot(data,hue='Species')
plt.show()

plt.figure(figsize=(2,2))
plt.stem(data['SepalLengthCm'])
plt.show()

plt.figure(figsize=(2,2))
sns.histplot(data['SepalLengthCm'])
plt.show()

data.columns.tolist()

['Id',
'SepalLengthCm',

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 5/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 6/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab
plt.figure(figsize=(2,2)) Name Rollno SGPA
sns.rugplot(data['SepalLengthCm']) 0 A 1 9.18
plt.show() 1 B 2 5.88
2 C 3 7.89
3 D 4 4.68
4 E 5 9.54
5 F 6 7.48
6 G 7 8.50
7 H 8 7.33
8 I 9 8.44
9 J 10 10.00

plt.figure(figsize=(2,2))
sns.jointplot(x='SepalLengthCm',y='SepalWidthCm',data=data)
plt.show()

<Figure size 200x200 with 0 Axes>

<Figure size 200x200 with 0 Axes>


<Figure size 200x200 with 0 Axes>

keyboard_arrow_down WEEK-5
Implementing Pre-processing techniques on data(using Pandas, Numpy) Performing exploratory data analysis on Toyota cars data set:

1. Dealing with missing values and deviations in the dataset.

2.Importing data and working with pandas dataframes.


import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import seaborn as sns import seaborn as sns
import pandas as pd data=pd.read_csv("/content/Life Expectancy Data.csv")
data = {'Name': ['A', 'B', 'C', 'D','E','F','G','H','I','J'], print(data)
'Rollno': [1,2,3,4,5,6,7,8,9,10],
Country Year Status Life expectancy Adult Mortality \
'SGPA': [9.18,5.88,7.89,4.68,9.54,7.48,8.5,7.33,8.44,10]}
0 Afghanistan 2015 Developing 65.0 263.0
df = pd.DataFrame(data)
1 Afghanistan 2014 Developing 59.9 271.0
print(df) 2 Afghanistan 2013 Developing 59.9 268.0
plt.figure(figsize=(2,2)) 3 Afghanistan 2012 Developing 59.5 272.0
plt.bar(df['Name'], df['SGPA']) 4 Afghanistan 2011 Developing 59.2 275.0
plt.show() ... ... ... ... ... ...
plt.pie(df['SGPA'], labels=df['Name'], autopct='%1.1f%%') 2933 Zimbabwe 2004 Developing 44.3 723.0
plt.figure(figsize=(2,2)) 2934 Zimbabwe 2003 Developing 44.5 715.0
plt.show() 2935 Zimbabwe 2002 Developing 44.8 73.0
plt.figure(figsize=(2,2)) 2936 Zimbabwe 2001 Developing 45.3 686.0
2937 Zimbabwe 2000 Developing 46.0 665.0

infant deaths Alcohol percentage expenditure Hepatitis B Measles \


0 62 0.01 71.279624 65.0 1154
1 64 0.01 73.523582 62.0 492
2 66 0.01 73.219243 64.0 430
3 69 0.01 78.184215 67.0 2787
4 71 0.01 7.097109 68.0 3013
... ... ... ... ... ...
2933 27 4.36 0.000000 68.0 31
2934 26 4.06 0.000000 7.0 998

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 7/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 8/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab
2935 25 4.43 0.000000 73.0 304 BMI 0
2936 25 1.72 0.000000 76.0 529 under-five deaths 0
2937 24 1.68 0.000000 79.0 1483 Polio 0
Total expenditure 0
... Polio Total expenditure Diphtheria HIV/AIDS GDP \ Diphtheria 0
0 ... 6.0 8.16 65.0 0.1 584.259210 HIV/AIDS 0
1 ... 58.0 8.18 62.0 0.1 612.696514 GDP 0
2 ... 62.0 8.13 64.0 0.1 631.744976 Population 0
3 ... 67.0 8.52 67.0 0.1 669.959000 thinness 1-19 years 0
4 ... 68.0 7.87 68.0 0.1 63.537231 thinness 5-9 years 0
... ... ... ... ... ... ... Income composition of resources 0
2933 ... 67.0 7.13 65.0 33.6 454.366654 Schooling 0
2934 ... 7.0 6.52 68.0 36.7 453.351155 dtype: int64
2935 ... 73.0 6.53 71.0 39.8 57.348340
2936 ... 76.0 6.16 75.0 42.1 548.587312
2937 ... 78.0 7.10 78.0 43.5 547.358878 data.head()

Population thinness 1-19 years thinness 5-9 years \


0 33736494.0 17.2 17.3 Life Adult infant percentage Hepatitis Total
1 327582.0 17.5 17.5 Country Year Status Alcohol Measles ... Polio Dip
expectancy Mortality deaths expenditure B expenditure
2 31731688.0 17.7 17.7
3 3696958.0 17.9 18.0
4 2978599.0 18.2 18.2 0 Afghanistan 2015 Developing 65.0 263.0 62 0.01 71.279624 65.0 1154 ... 6.0 8.16
... ... ... ...
2933 12777511.0 9.4 9.4 1 Afghanistan 2014 Developing 59.9 271.0 64 0.01 73.523582 62.0 492 ... 58.0 8.18
2934 12633897.0 9.8 9.9
2935 125525.0 1.2 1.3 2 Afghanistan 2013 Developing 59.9 268.0 66 0.01 73.219243 64.0 430 ... 62.0 8.13
2936 12366165.0 1.6 1.7
2937 12222251.0 11.0 11.2 3 Afghanistan 2012 Developing 59.5 272.0 69 0.01 78.184215 67.0 2787 ... 67.0 8.52

4 Afghanistan 2011 Developing 59.2 275.0 71 0.01 7.097109 68.0 3013 ... 68.0 7.87
Income composition of resources Schooling
0 0.479 10.1 5 rows × 22 columns
1 0.476 10.0
2 0.470 9.9
3 0.463 9.8 Distributions
4 0.454 9.5

from sklean impute import ICNN imputer

for i in ["Schooling", "Polio", "Income composition of resources", "infant deaths"]:


data[i].fillna(data[i].median(),inplace=True)
data.isnull().sum()
2-d distributions
Country 0
Year 0
Status 0
Life expectancy 0
Adult Mortality 0
infant deaths 0
Alcohol 0
percentage expenditure 0
Hepatitis B 0
Time series
Measles 0
BMI 0
under-five deaths 0
Polio 0
Total expenditure 0
Diphtheria 0
HIV/AIDS 0
Values
GDP 0
Population 0
thinness 1-19 years 0
thinness 5-9 years 0
Income composition of resources 0
Schooling 0
dtype: int64
Warning: Total number of columns (22) exceeds max_columns (20) limiting to first (20) columns.
ERROR:root:Did not find quickchart key chart-6657487f-3222-49f9-8852-98ff7a035d3c in chart cache
for column in ["Schooling", "Polio", "Income composition of resources", "infant deaths"]: ERROR:root:Did not find quickchart key chart-bede4e95-b26c-4dcd-9fb2-75ed70cd2013 in chart cache
try: ERROR:root:Did not find quickchart key chart-6657487f-3222-49f9-8852-98ff7a035d3c in chart cache
if column in data.columns:
data[column].fillna(data[column].median(), inplace=True)
else: from sklearn.impute import KNNImputer
print(f"Warning: Column '{column}' not found in the DataFrame.") imputer = KNNImputer()
except Exception as e: for i in data.select_dtypes(include='number'):
print(f"An error occurred while processing column '{column}': {e}") data[i] = imputer.fit_transform(data[[i]])
data.isnull().sum() data.isnull().sum()

Country 0 Country 0
Year 0 Year 0
Status 0 Status 0
Life expectancy 0 Life expectancy 0
Adult Mortality 0 Adult Mortality 0
infant deaths 0 infant deaths 0
Alcohol 0 Alcohol 0
percentage expenditure 0 percentage expenditure 0
Hepatitis B 0 Hepatitis B 0
Measles 0 Measles 0

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 9/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 10/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab
BMI 0
under-five deaths 0
Polio 0
Total expenditure 0
Diphtheria 0
HIV/AIDS 0
GDP 0
Population 0
thinness 1-19 years 0
thinness 5-9 years 0
Income composition of resources 0
Schooling 0
dtype: int64

s=data.select_dtypes(include='number').corr()
plt.figure(figsize=(10,10))
sns.heatmap(s,annot=True)

<Axes: >

for i in data.select_dtypes(include='number').columns:
sns.scatterplot(x=i,y='Life expectancy ',data=data)
plt.show()

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 11/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 12/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 13/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 14/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 15/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 16/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab

Applying Binning techniques for data smoothening on dataset

def print_elements_up_to_m_times(lst, n, m):


# Total number of elements in the list
total_len = len(lst)

# Loop for m times or until there are no more elements to process


for i in range(m):
# Calculate start and end indices for the current segment
start = i * 2 * n
end = start + 2 * n

# Break the loop if starting index is beyond the list length


if start >= total_len:
print("No more elements to print.")
break

# Print the first n elements of the current segment


first_n = lst[start:start + n]
print(f"Iteration {i + 1} - First {n} elements:", first_n)

# Print the next n elements of the current segment


next_n = lst[start + n:end]
print(f"Iteration {i + 1} - Next {n} elements:", next_n)

# Example usage
l=[12,23,34,45,56,67,78,89,90,100,99,88]
minm=min(l)
maxm=max(l)
x=(maxm-minm)//len(l)
bins=max(len(l),x)-min(len(l),x)
print('no of bins :',bins)
y=int(len(l)/bins)
print('no of elements in each bin :',y)
n = bins
m = y
print_elements_up_to_m_times(l, n, m)

no of bins : 5
no of elements in each bin : 2
Iteration 1 - First 5 elements: [12, 23, 34, 45, 56]
Iteration 1 - Next 5 elements: [67, 78, 89, 90, 100]
Iteration 2 - First 5 elements: [99, 88]
Iteration 2 - Next 5 elements: []

3. Applying normalization techniques.

def min_max_normalize(data):
"""
Normalize the data using Min-Max normalization.

Args:
data (list of floats or ints): The data to be normalized.

Returns:
list of floats: The normalized data.
"""
min_val = min(data)
max_val = max(data)
normalized_data = [(x - min_val) / (max_val - min_val) for x in data]
return normalized_data

# Example usage
data = [10, 20, 30, 40, 50]
normalized_data = min_max_normalize(data)

print("Original Data: ", data)


print("Min-Max Normalized Data: ", normalized_data)

Original Data: [10, 20, 30, 40, 50]


Min-Max Normalized Data: [0.0, 0.25, 0.5, 0.75, 1.0]

import matplotlib.pyplot as plt

def min_max_normalize(data):
"""
Normalize the data using Min-Max normalization.

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 17/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 18/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab
'Height': [170, 180, 160]
Args: }
data (list of floats or ints): The data to be normalized.
# Function to perform Z-score normalization
Returns: def z_score_normalize(feature):
list of floats: The normalized data. mean = np.mean(feature)
""" std_dev = np.std(feature)
min_val = min(data) return [(x - mean) / std_dev for x in feature]
max_val = max(data)
normalized_data = [(x - min_val) / (max_val - min_val) for x in data] # Normalize each feature
return normalized_data normalized_data = {key: z_score_normalize(value) for key, value in data.items()}

# Example usage # Plotting the data points before and after normalization
data = [123,23,345,456,567,678,789,867,456,3231,4342,456] plt.figure(figsize=(12, 6))
normalized_data = min_max_normalize(data)
# Original data
print("Original Data: ", data) plt.subplot(1, 2, 1)
print("Min-Max Normalized Data: ", normalized_data) for feature in data:
plt.plot(data[feature], 'o-', label=feature)
# Plotting the data points before and after normalization plt.title('Original Data')
plt.figure(figsize=(12, 6)) plt.xlabel('Index')
plt.ylabel('Value')
# Original data plt.legend()
plt.subplot(1, 2, 1)
plt.plot(data, 'bo-', label='Original Data') # Normalized data
plt.title('Original Data') plt.subplot(1, 2, 2)
plt.xlabel('Index') for feature in normalized_data:
plt.ylabel('Value') plt.plot(normalized_data[feature], 'o-', label=feature)
plt.legend() plt.title('Z-score Normalized Data')
plt.xlabel('Index')
# Normalized data plt.ylabel('Normalized Value')
plt.subplot(1, 2, 2) plt.legend()
plt.plot(normalized_data, 'ro-', label='Normalized Data')
plt.title('Min-Max Normalized Data') plt.tight_layout()
plt.xlabel('Index') plt.show()
plt.ylabel('Normalized Value')
plt.legend()

plt.tight_layout()
plt.show()

Original Data: [123, 23, 345, 456, 567, 678, 789, 867, 456, 3231, 4342, 456]
Min-Max Normalized Data: [0.0231535077564251, 0.0, 0.07455429497568881, 0.10025468858532068, 0.12595508219495252, 0.151655475804584

import numpy as np
import matplotlib.pyplot as plt

# Define the dataset


data = [123, 23, 345, 456, 567, 678, 789, 867, 456, 3231, 4342, 456]

# Calculate the mean and standard deviation


import numpy as np mean = np.mean(data)
import matplotlib.pyplot as plt std_dev = np.std(data)

# Define the dataset # Apply Z-score normalization


data = { z_scores = [(x - mean) / std_dev for x in data]
'Age': [25, 35, 45],
'Income': [50000, 60000, 70000],

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 19/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 20/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab
# Plotting the data points before and after normalization plt.xlabel('Index')
plt.figure(figsize=(12, 6)) plt.ylabel('Z-score')

# Original data plt.tight_layout()


plt.subplot(1, 2, 1) plt.show()
plt.plot(data, 'bo-', label='Original Data')
plt.title('Original Data')
plt.xlabel('Index')
plt.ylabel('Value')
plt.legend()

# Z-score normalized data


plt.subplot(1, 2, 2)
plt.plot(z_scores, 'ro-', label='Z-score Normalized Data')
plt.title('Z-score Normalized Data')
plt.xlabel('Index')
plt.ylabel('Z-score')
plt.legend()

plt.tight_layout()
plt.show()

Double-click (or enter) to edit

import numpy as np
import matplotlib.pyplot as plt
l = np.array([10, 20, 30, 408, 501, 606, 705, 801, 90, 1])
def min_max_normalize(data):
min_val = np.min(data)
max_val = np.max(data)
return (data - min_val) / (max_val - min_val)
normalized_values = min_max_normalize(l)
plt.figure(figsize=(5,3))
plt.subplot(1, 2, 1)
plt.scatter(range(len(l)), l, color='blue', alpha=0.7)
plt.title('Original Data')
plt.xlabel('Index')
plt.ylabel('Value')
plt.subplot(1, 2, 2)
plt.scatter(range(len(normalized_values)), normalized_values, color='green', alpha=0.7)
plt.title('Min-Max Normalized Data')
plt.xlabel('Index')
plt.ylabel('Normalized Value')

plt.tight_layout()
plt.show()

keyboard_arrow_down WEEK-6
Double-click (or enter) to edit

#plotting
import numpy as np
import matplotlib.pyplot as plt
data = [10, 20, 30, 408, 501, 606, 705, 801, 90, 1]
data_array = np.array(data)
mean = np.mean(data_array)
std_dev = np.std(data_array)
z_scores = (data_array - mean) / std_dev
plt.figure(figsize=(5,4))

# Original Data Scatter Plot


plt.subplot(1, 2, 1) Double-click (or enter) to edit
plt.scatter(range(len(data)), data, color='red', alpha=0.5)
plt.title('Original Data')
plt.xlabel('Index') import matplotlib.pyplot as pt
plt.ylabel('Value') pt.plot([1,2,3,4],[2,3,4,5]);pt.show()
pt.bar([1,2,3,4],[2,3,4,5]);pt.show()
# Z-score Normalized Data Scatter Plot pt.hist([1,2,3,4],[2,3,4,5]);pt.show()
plt.subplot(1, 2, 2) pt.scatter([1,2,3,4],[2,3,4,5]);pt.show()
plt.scatter(range(len(z_scores)), z_scores, color='cyan', alpha=0.7)
plt.title('Z-score Normalized Data')

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 21/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 22/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab

2.Visualizing the plots for the data distribution of Toyota.csv cars dataset.

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
data=pd.read_csv("/content/Toyota.csv")
data.head()

Unnamed: 0 Price Age KM FuelType HP MetColor Automatic CC Doors Weight

0 0 13500 23.0 46986 Diesel 90 1.0 0 2000 three 1165

1 1 13750 23.0 72937 Diesel 90 1.0 0 2000 3 1165

2 2 13950 24.0 41711 Diesel 90 NaN 0 2000 3 1165

3 3 14950 26.0 48000 Diesel 90 0.0 0 2000 3 1165

4 4 13750 30 0 38500 Diesel 90 00 0 2000 3 1170

s=data.select_dtypes(include='number').corr()
plt.figure(figsize=(5,5))
sns.heatmap(s,annot=True)

<Axes: >

keyboard_arrow_down WEEK-7
1.Build a regression model to predict employee Salary using Salary_Data data set.

#data load
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score,classification_report
df=pd.read_csv('/content/salary.csv')
print(df)

YearsExperience Salary
0 1.1 39343

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 23/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 24/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab
1 1.3 46205
2 1.5 37731
3 2.0 43525
4 2.2 39891
5 2.9 56642
6 3.0 60150
7 3.2 54445
8 3.2 64445
9 3.7 57189
10 3.9 63218
11 4.0 55794
12 4.0 56957
13 4.1 57081
14 4.5 61111
15 4.9 67938
16 5.1 66029
17 5.3 83088
18 5.9 81363
19 6.0 93940
20 6.8 91738
21 7.1 98273
22 7.9 101302
23 8.2 113812
24 8.7 109431
25 9.0 105582
26 9.5 116969
27 9.6 112635
28 10.3 122391
29 10.5 121872
Logistic regression.

plt.figure(figsize=(300,300))
plt.scatter(df['YearsExperience'],df['Salary']) import pandas as pd
plt.xlabel('YearsExperience') data = pd.read_csv('/content/weatherAUS.csv')
plt.ylabel('Salary') print(data.head())
plt.title('Salary vs Years of Experience') print(data.info())
plt.show() print(data.describe())
print(data.shape)

Date Location MinTemp MaxTemp Rainfall Evaporation Sunshine \


0 2008-12-01 Albury 13.4 22.9 0.6 NaN NaN
X_train, X_test, y_train, y_test = train_test_split(X, y_binary, test_size=0.2, random_state=0) 1 2008-12-02 Albury 7.4 25.1 0.0 NaN NaN
2 2008-12-03 Albury 12.9 25.7 0.0 NaN NaN
3 2008-12-04 Albury 9.2 28.0 0.0 NaN NaN
# Train the linear regression model 4 2008-12-05 Albury 17.5 32.3 1.0 NaN NaN
model = LinearRegression()
model.fit(X_train, y_train) WindGustDir WindGustSpeed WindDir9am ... Humidity3pm Pressure9am \
0 W 44.0 W ... 22.0 1007.7
1 WNW 44.0 NNW ... 25.0 1010.6
▾ LinearRegression
2 WSW 46.0 W ... 30.0 1007.6
LinearRegression() 3 NE 24.0 SE ... 16.0 1017.6
4 W 41.0 ENE ... 33.0 1010.8

Pressure3pm Cloud9am Cloud3pm Temp9am Temp3pm RainToday RISK_MM \


# Make predictions
0 1007.1 8.0 NaN 16.9 21.8 No 0.0
y_pred = model.predict(X_test)
1 1007.8 NaN NaN 17.2 24.3 No 0.0
# Convert the continuous predictions into binary categories 2 1008.7 NaN 2.0 21.0 23.2 No 0.0
y_pred = np.where(y_pred > threshold, 1, 0) 3 1012.8 NaN NaN 18.1 26.5 No 1.0
4 1006.0 7.0 8.0 17.8 29.7 No 0.2

plt.scatter(X_train, y_train, color='blue', label='Training Data') RainTomorrow


plt.plot(X_train, model.predict(X_train), color='red', label='Regression Line') 0 No
plt.title('Salary vs Experience (Training set)') 1 No
plt.xlabel('Years of Experience') 2 No
plt.ylabel('Salary') 3 No
plt.legend() 4 No
plt.show()
[5 rows x 24 columns]
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 142193 entries, 0 to 142192
Data columns (total 24 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Date 142193 non-null object
1 Location 142193 non-null object
2 MinTemp 141556 non-null float64
3 MaxTemp 141871 non-null float64
4 Rainfall 140787 non-null float64
5 Evaporation 81350 non-null float64
6 Sunshine 74377 non-null float64
7 WindGustDir 132863 non-null object
8 WindGustSpeed 132923 non-null float64
9 WindDir9am 132180 non-null object
10 WindDir3pm 138415 non-null object
11 WindSpeed9am 140845 non-null float64
12 WindSpeed3pm 139563 non-null float64
13 Humidity9am 140419 non-null float64
14 Humidity3pm 138583 non-null float64
15 Pressure9am 128179 non-null float64
16 Pressure3pm 128212 non-null float64

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 25/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 26/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab
17 Cloud9am 88536 non-null float64 df = pd.read_csv('/content/Iris (3).csv')
18 Cloud3pm 85099 non-null float64 imputer = KNNImputer()
19 Temp9am 141289 non-null float64 for i in df.select_dtypes(include=np.number).columns:
20 Temp3pm 139467 non-null float64 df[i] = imputer.fit_transform(df[[i]])
21 RainToday 140787 non-null object
scaler = MinMaxScaler()
22 RISK_MM 142193 non-null float64
df[df.select_dtypes(include=np.number).columns] = scaler.fit_transform(df[df.select_dtypes(include=np.number).columns])
23 RainTomorrow 142193 non-null object
X = df.drop('Species', axis=1)
y = df['Species']
# Handle missing values (replace with median for numerical columns)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
numerical_cols = data.select_dtypes(include=['number']).columns
knn = KNeighborsClassifier(n_neighbors=3) # You can adjust n_neighbors
data[numerical_cols] = data[numerical_cols].fillna(data[numerical_cols].median())
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
# Select relevant features and target variable
print("KNN Classifier for Iris Dataset:")
features = ['MinTemp', 'MaxTemp', 'Rainfall', 'Evaporation', 'Sunshine', 'WindGustSpeed', 'WindSpeed9am', 'WindSpeed3pm', 'Humidity9am',
print("Accuracy:", accuracy_score(y_test, y_pred))
target = 'RainTomorrow'
print("Confusion Matrix:\n", confusion_matrix(y_test, y_pred))
print("Classification Report:\n", classification_report(y_test, y_pred))
X = data[features]
sns.heatmap(confusion_matrix(y_test, y_pred), annot=True)
y = data[target].map({'Yes': 1, 'No': 0}) # Convert target to binary

# Split data into training and testing sets


KNN Classifier for Iris Dataset:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) Accuracy: 1.0
Confusion Matrix:
# Create and fit the logistic regression model [[11 0 0]
model = LogisticRegression() [ 0 13 0]
model.fit(X_train, y_train) [ 0 0 6]]
# Make predictions on the test set Classification Report:
y_pred = model.predict(X_test) precision recall f1-score support
# Evaluate the model
Iris-setosa 1.00 1.00 1.00 11
accuracy = accuracy_score(y_test, y_pred)
Iris-versicolor 1.00 1.00 1.00 13
conf_matrix = confusion_matrix(y_test, y_pred)
Iris-virginica 1.00 1.00 1.00 6
class_report = classification_report(y_test, y_pred)
print("Accuracy:", accuracy) accuracy 1.00 30
print("\nConfusion Matrix:\n", conf_matrix) macro avg 1.00 1.00 1.00 30
print("\nClassification Report:\n", class_report) weighted avg 1.00 1.00 1.00 30

Accuracy: 0.836105348289321 <Axes: >

Confusion Matrix:
[[20792 1306]
[ 3355 2986]]

Classification Report:
precision recall f1-score support

0 0.86 0.94 0.90 22098


1 0.70 0.47 0.56 6341

accuracy 0.84 28439


macro avg 0.78 0.71 0.73 28439
weighted avg 0.82 0.84 0.82 28439

/usr/local/lib/python3.10/dist-packages/sklearn/linear_model/_logistic.py:460: ConvergenceWarning: lbfgs failed to converge (status=


STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
n_iter_i = _check_optimize_result(

digits = load_digits()

keyboard_arrow_down WEEK-8 X = digits.data


y = digits.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
svm = SVC()
Implementation of Machine Learning algorithms- Classification Algorithms (using Pandas and Sci-kit Learn)Iris Dataset svm.fit(X_train, y_train)
y_pred = svm.predict(X_test)
1.Build a KNN classifier for classifying the flower species of iris data set. print("\nSVM Classifier for Digits Dataset:")
print("Accuracy:", accuracy_score(y_test, y_pred))
2.Implementation of SVM classifier for digit classification on Digits dataset print("Confusion Matrix:\n", confusion_matrix(y_test, y_pred))
print("Classification Report:\n", classification_report(y_test, y_pred))
sns.heatmap(confusion_matrix(y_test, y_pred), annot=True)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.datasets import load_digits
from sklearn.svm import SVC
import pandas as pd
import numpy as np
from sklearn.impute import KNNImputer
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
import matplotlib.pyplot as plt
import seaborn as sns
# KNN classifier for Iris dataset

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 27/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 28/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab
TN = np.sum((y_test == 0) & (y_pred == 0))
SVM Classifier for Digits Dataset: precision = TP / (TP + FP) if (TP + FP) > 0 else 0
Accuracy: 0.9916666666666667 print("Precision:", precision)
Confusion Matrix: recall = TP/TP+FN if (TP + FN) > 0 else 0
[[27 0 0 0 0 0 0 0 0 0] print("Recall:", recall)
[ 0 35 0 0 0 0 0 0 0 0] F1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
[ 0 0 36 0 0 0 0 0 0 0] print("F1 Score:", F1_score)
[ 0 0 0 29 0 0 0 0 0 0] print("Classification Report:\n", classification_report(y_test, y_pred))
[ 0 0 0 0 30 0 0 0 0 0]
auc = roc_auc_score(y_test, y_pred)
[ 0 0 0 0 0 39 0 0 0 1]
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
[ 0 0 0 0 0 0 44 0 0 0]
print("AUC:", auc)
[ 0 0 0 0 0 0 0 39 0 0]
[ 0 1 0 0 0 0 0 0 38 0] print(f"AUC: {auc:.4f}")
[ 0 0 0 0 0 1 0 0 0 40]] plt.figure(figsize=(8, 6))
Classification Report: plt.plot(fpr, tpr, color='purple', label=f'AUC = {auc:.4f}')
precision recall f1-score support plt.plot([0, 1], [0, 1], color='orange', linestyle='--') # Diagonal line for random model
plt.xlim([0.0, 1.0])
0 1.00 1.00 1.00 27 plt.ylim([0.0, 1.05])
1 0.97 1.00 0.99 35 plt.xlabel('False Positive Rate (FPR)')
2 1.00 1.00 1.00 36 plt.ylabel('True Positive Rate (TPR)')
3 1.00 1.00 1.00 29
plt.title('Receiver Operating Characteristic (ROC) Curve')
4 1.00 1.00 1.00 30
plt.legend(loc="lower right")
5 0.97 0.97 0.97 40
6 1.00 1.00 1.00 44 plt.grid(True)
7 1.00 1.00 1.00 39 plt.show()
8 1.00 0.97 0.99 39
9 0.98 0.98 0.98 41 Precision: 1.0
Recall: 1.0
accuracy 0.99 360 F1 Score: 1.0
macro avg 0.99 0.99 0.99 360 Classification Report:
weighted avg 0.99 0.99 0.99 360 precision recall f1-score support

<Axes: > 0 1.00 1.00 1.00 21


1 1.00 1.00 1.00 19

accuracy 1.00 40
macro avg 1.00 1.00 1.00 40
weighted avg 1.00 1.00 1.00 40

AUC: 1.0
AUC: 1.0000

Classification metrics

from sklearn.neighbors import KNeighborsClassifier


from sklearn.datasets import load_digits
from sklearn.svm import SVC
import pandas as pd
import numpy as np
from sklearn.impute import KNNImputer
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
import matplotlib.pyplot as plt
import seaborn as sns

keyboard_arrow_down WEEK-9
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, roc_curve
import matplotlib.pyplot as pl
dr = pd.read_csv('/content/moonDataset.csv')
X = dr[['X1','X2','X3']]
import cv2
y = dr['label']
from google.colab.patches import cv2_imshow
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
svm = SVC(kernel ='rbf')
# Read the image using cv2.imread
svm.fit(X_train, y_train)
img = cv2.imread('/content/image.jpg')
y_pred = svm.predict(X_test)
# Display the image using cv2_imshow
#computing confusion matrix manually
cv2_imshow(img)
TP = np.sum((y_test == 1) & (y_pred == 1))
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
FP = np.sum((y_test == 0) & (y_pred == 1))
cv2_imshow(gray_image)
FN = np.sum((y_test == 1) & (y_pred == 0))
rotated_image_90 = cv2.transpose(image)

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 29/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 30/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab
rotated_image_90 = cv2.flip(rotated_image_90, flipCode=1)
cv2_imshow(rotated_image_90)
rotated_image_180 = cv2.flip(image, flipCode=-1)
cv2_imshow(rotated_image_180)
x_start, y_start = 50, 50 # Top-left corner of the cropping rectangle
x_end, y_end = 200, 200 # Bottom-right corner of the cropping rectangle
# Crop the image
cropped_image = image[y_start:y_end, x_start:x_end]
cv2_imshow(cropped_image)
resized_image = cv2.resize(image, (500, 500))
cv2_imshow(resized_image)
(height, width, channels) = resized_image.shape
# Display the dimensions
print(f"Width: {width}")
print(f"Height: {height}")
print(f"Channels: {channels}")

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 31/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 32/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab

Width: 500
Height: 500

keyboard_arrow_down New section


im1 = cv2.imread('/content/AIT.jpeg')
im2 = cv2.imread('/content/AIT2.jpeg')
image1 = cv2.resize(im1, (200, 200))
image2 = cv2.resize(im2, (200, 200))
# Ensure the images are the same size
if image1.shape != image2.shape:
print("Images must be of the same size for subtraction.")
else:
subtracted_image = cv2.absdiff(image1, image2)
# Display the original images and the result
cv2_imshow(image1) # Display the first grayscale image
cv2_imshow(image2) # Display the second grayscale image
cv2_imshow(subtracted_image) # Display the subtracted image
added_image = cv2.add(image1, image2)
cv2_imshow(added_image)
multiplied_image = cv2.multiply(image1, image2)
cv2_imshow(multiplied_image)
or_image = cv2.bitwise_or(image1, image2)
cv2_imshow(or_image)
and_image = cv2.bitwise_and(image1, image2)
cv2_imshow(and_image)

WEEK 10

1.Program for extracting image properties using OpenCV.

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 33/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 34/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab
import cv2
image = cv2.imread('/content/AIT2.jpeg')
if image is None:
print("Error: Could not load the image.")
else:
height, width = image.shape[:2]
channels = image.shape[2] if len(image.shape) == 3 else 1 # Check for color channels
image_type = image.dtype
print("Image Height:", height)
print("Image Width:", width)
print("Number of Channels:", channels)
print("Image Data Type:", image_type)

Image Height: 192


Image Width: 204
Number of Channels: 3
Image Data Type: uint8

2.Program for accessing and modifying pixel values.

import cv2
from google.colab.patches import cv2_imshow
image = cv2.imread('/content/AIT2.jpeg') 4.Program for color mixing in OpenCV.
if image is None:
print("Error: Could not load the image.")
else: import cv2
x = 100 import numpy as np
y = 50 from google.colab.patches import cv2_imshow
pixel_value = image[y, x] image1 = cv2.imread('/content/AIT2.jpeg')
print("Pixel value at (", x, ",", y, "):", pixel_value) image2 = cv2.imread('/content/AIT.jpeg')
new_pixel_value = [255, 0, 0] if image1.shape != image2.shape:
image[y, x] = new_pixel_value image2 = cv2.resize(image2, (image1.shape[1], image1.shape[0]))
modified_pixel_value = image[y, x] alpha = 0.5
print("Modified pixel value at (", x, ",", y, "):", modified_pixel_value) beta = 1.0 - alpha
cv2_imshow(image) mixed_image = cv2.addWeighted(image1, alpha, image2, beta, 0.0)
cv2.waitKey(0) cv2_imshow(mixed_image)
cv2.destroyAllWindows() cv2.waitKey(0)
cv2.destroyAllWindows()
Pixel value at ( 100 , 50 ): [115 126 170]
Modified pixel value at ( 100 , 50 ): [255 0 0]

1.Implement SIFT algorithm (Scale Invariant Feature Transformation) using OpenCv


3.Implementing various image smoothening techniques.

import cv2
import cv2 from google.colab.patches import cv2_imshow
from google.colab.patches import cv2_imshow image = cv2.imread('/content/AIT.jpeg')
import numpy as np sift = cv2.SIFT_create()
image = cv2.imread('/content/AIT2.jpeg') keypoints, descriptors = sift.detectAndCompute(image, None)
blurred_average = cv2.blur(image, (5, 5)) # Kernel size (5x5) image_with_keypoints = cv2.drawKeypoints(image, keypoints, None)
resized_average = cv2.resize(blurred_average, (100, 100)) # Resize to 200x200 cv2_imshow(image_with_keypoints)
cv2_imshow(resized_average) cv2.waitKey(0)
blurred_gaussian = cv2.GaussianBlur(image, (5, 5), 0) cv2.destroyAllWindows()
resized_gaussian = cv2.resize(blurred_gaussian, (100, 100)) # Resize to 200x200
cv2_imshow(resized_gaussian)
blurred_median = cv2.medianBlur(image, 5) # Kernel size 5
resized_median = cv2.resize(blurred_median, (100, 100)) # Resize to 200x200
cv2_imshow(resized_median)
blurred_bilateral = cv2.bilateralFilter(image, 9, 75, 75) # Adjust parameters for desired effect
resized_bilateral = cv2.resize(blurred_bilateral, (100, 100)) # Resize to 200x200
cv2_imshow(resized_bilateral)

2..Implement SURF using OpenCv

import cv2
import numpy as np
from google.colab.patches import cv2_imshow

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 35/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 36/38


10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab 10/21/24, 6:03 PM 22331A0550(CSE-A):AITT LAB SHEET - Colab
image = cv2.imread('/content/AIT.jpeg')
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
max_corners = 100 # Maximum number of corners to detect
Implement BF matcher algorithm
quality_level = 0.01 # Minimum quality level (0 to 1)
min_distance = 10 # Minimum distance between detected corners
corners = cv2.goodFeaturesToTrack(gray_image, max_corners, quality_level, min_distance) import numpy as np
corners = np.int0(corners) import cv2 as cv
for corner in corners: import matplotlib.pyplot as plt
x, y = corner.ravel() img1 = cv.imread('/content/AIT.jpeg',cv.IMREAD_GRAYSCALE) # queryImage
cv2.circle(image, (x, y), 3, (0, 255, 0), -1) <matplotlib.image.AxesImage
img2 = at 0x7d82013182e0>
cv.imread('/content/AIT2.jpeg',cv.IMREAD_GRAYSCALE) # trainImage
cv2_imshow(image) sift = cv.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1,None)
<ipython-input-7-04b2081f0066>:10: DeprecationWarning: `np.int0` is a deprecated alias for `np.intp`. (Deprecated NumPy 1.24)
kp2, des2 = sift.detectAndCompute(img2,None)
corners = np.int0(corners)
bf = cv.BFMatcher()
matches = bf.knnMatch(des1,des2,k=2)
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
img3 = cv.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
plt.figure(figsize=(2,2))
plt.imshow(img3),plt.show()

4.implement harris corner detection algorithm

import matplotlib.pyplot as plt


import numpy as np
import cv2
image = cv2.imread('/content/AIT.jpeg')
image_copy = np.copy(image) Double-click (or enter) to edit
image_copy = cv2.cvtColor(image_copy, cv2.COLOR_BGR2RGB)
plt.figure(figsize=(2, 2))
plt.imshow(image_copy)
plt.axis('off') # Hide axes
plt.show()
gray = cv2.cvtColor(image_copy, cv2.COLOR_RGB2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, 2, 3, 0.04)
dst = cv2.dilate(dst,None)
plt.figure(figsize=(2,2))
plt.imshow(dst, cmap='gray')
thresh = 0.1*dst.max()
corner_image = np.copy(image_copy)
for j in range(0, dst.shape[0]):
for i in range(0, dst.shape[1]):
if(dst[j,i] > thresh):
cv2.circle( corner_image, (i, j), 1, (0,255,0), 1)
plt.figure(figsize=(2,2))
plt.imshow(corner_image)

https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 37/38 https://colab.research.google.com/drive/12gdnOTGNEqrrTmXnDGvclLUbkfWgNpmI#scrollTo=1sBPDHWwEFcF&printMode=true 38/38

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy