Keeraiit 2
Keeraiit 2
prime_numbers get_prime_numbers(list1)
print(prime_numbers)
1.Create a list of five elements.pass the list to a function and compute the average of five numbers keyboard_arrow_down WEEK-2
def average(numbers):
return sum(numbers)/len(numbers) 1.BFS
numbers=[10,20,30,40,50]
average= average(numbers)
from collections import deque
print(f"The average of the numbers is: {average}")
def bfs(graph, start):
The average of the numbers is: 30.0
visited = set()
queue = deque([start])
2.write a program that prompts a user to enter the element of a list and add the element to a list.write a function maximum(Lst)and visited.add(start)
while queue:
minimum(Lst)to find the maximum and minimum number from the list
vertex = queue.popleft()
print(vertex, end=' ')
for neighbor in graph[vertex]:
n = int(input("Enter no.of Elements :- "))
if neighbor not in visited:
listt = []
visited.add(neighbor)
for i in range (n):
queue.append(neighbor)
i = int(input("Enter the number :-"))
if __name__ == "__main__":
listt.append(i)
graph = {
'A': ['B', 'C'],
def maximum(l):
'B': ['A', D', 'E'],
max=0
'C': ['A', 'F'],
for i in range(len(l)):
'D': ['B'],
if(max<l[i]):
'E': ['B', 'F'],
max=l[i]
'F': ['C', 'E']
print("Maximum of given list :- ",max)
}
def minimum(l):
print("BFS starting from vertex 'A':")
min=l[0]
bfs(graph, 'D')
for i in range(len(l)):
if(min>l[i]):
BFS starting from vertex 'A':
min=l[i]
D B A E C F
print("Minimum of given list :- ",min)
maximum(listt) 2.DFS
minimum(listt)
}
'F': ['C', 'E']
keyboard_arrow_down WEEK-4
def get_children(node):
1.Performing exploratory data analysis on iris.csv data set.
return graph.get(node, [])
keyboard_arrow_down WEEK-3
147 148 6.5 3.0 5.2 2.0
148 149 6.2 3.4 5.4 2.3
149 150 5.9 3.0 5.1 1.8
Species
1.Write a program to Perform operations of NumPy in python -type -length – ndim- shape -reshape -arange -item size -dtype. 0 Iris-setosa
1 Iris-setosa
2 Iris-setosa
import numpy as np 3 Iris-setosa
x=np.array([1,2,3,4,5,6]) 4 Iris-setosa
.. ...
print(f"Type:{type(x)}")
145 Iris-virginica
print(f"Lenght:{len(x)}")
146 Iris-virginica
print(f"No of dimentions in array : {x.ndim}")
147 Iris-virginica
print(f"Shape:{x.shape}") 148 Iris-virginica
rs=x.reshape((2,3)) 149 Iris-virginica
print(f"Shape of Reshaped array : {rs.shape}")
print(f"Reshaped array : {rs}") [150 rows x 6 columns]
print(f"Original array : {x}")
h=np.array([[1,2,3],[4,5,6]])
print(h) import seaborn as sns
print(h.ndim) import matplotlib.pyplot as plt
plt.figure(figsize=(3,3))
Type:<class 'numpy.ndarray'> sns.scatterplot(x="SepalLengthCm",y="SepalWidthCm",data=data)
Lenght:6 plt.show()
No of dimentions in array : 1
Shape:(6,)
Shape of Reshaped array : (2, 3)
Reshaped array : [[1 2 3]
[4 5 6]]
Original array : [1 2 3 4 5 6]
[[1 2 3]
[4 5 6]]
2
2.Write a program in python to perform NumPy arithmetic operations. -add -subtract - multiply -divide -dot -sin, cos, tan -sum -transpose –
power.
import numpy as np
a = np.array([90,30,8,9,10])
b= np.array([1,2,3,4,5])
print(a+b)
print(a-b)
print(a*b)
print(a/b)
print(np.dot(a,b))
data.nunique()
Id 150
SepalLengthCm 35
SepalWidthCm 23
PetalLengthCm 43
PetalWidthCm 22
Species 3
dtype: int64
numerical_df=data.drop(['Species'],axis=1)
sns.heatmap(numerical_df.corr(),annot=True)
plt.show()
sns.pairplot(data,hue='Species')
plt.show()
plt.figure(figsize=(2,2))
plt.stem(data['SepalLengthCm'])
plt.show()
plt.figure(figsize=(2,2))
sns.histplot(data['SepalLengthCm'])
plt.show()
data.columns.tolist()
['Id',
'SepalLengthCm',
plt.figure(figsize=(2,2))
sns.jointplot(x='SepalLengthCm',y='SepalWidthCm',data=data)
plt.show()
keyboard_arrow_down WEEK-5
Implementing Pre-processing techniques on data(using Pandas, Numpy) Performing exploratory data analysis on Toyota cars data set:
4 Afghanistan 2011 Developing 59.2 275.0 71 0.01 7.097109 68.0 3013 ... 68.0 7.87
Income composition of resources Schooling
0 0.479 10.1 5 rows × 22 columns
1 0.476 10.0
2 0.470 9.9
3 0.463 9.8 Distributions
4 0.454 9.5
Country 0 Country 0
Year 0 Year 0
Status 0 Status 0
Life expectancy 0 Life expectancy 0
Adult Mortality 0 Adult Mortality 0
infant deaths 0 infant deaths 0
Alcohol 0 Alcohol 0
percentage expenditure 0 percentage expenditure 0
Hepatitis B 0 Hepatitis B 0
Measles 0 Measles 0
s=data.select_dtypes(include='number').corr()
plt.figure(figsize=(10,10))
sns.heatmap(s,annot=True)
<Axes: >
for i in data.select_dtypes(include='number').columns:
sns.scatterplot(x=i,y='Life expectancy ',data=data)
plt.show()
# Example usage
l=[12,23,34,45,56,67,78,89,90,100,99,88]
minm=min(l)
maxm=max(l)
x=(maxm-minm)//len(l)
bins=max(len(l),x)-min(len(l),x)
print('no of bins :',bins)
y=int(len(l)/bins)
print('no of elements in each bin :',y)
n = bins
m = y
print_elements_up_to_m_times(l, n, m)
no of bins : 5
no of elements in each bin : 2
Iteration 1 - First 5 elements: [12, 23, 34, 45, 56]
Iteration 1 - Next 5 elements: [67, 78, 89, 90, 100]
Iteration 2 - First 5 elements: [99, 88]
Iteration 2 - Next 5 elements: []
def min_max_normalize(data):
"""
Normalize the data using Min-Max normalization.
Args:
data (list of floats or ints): The data to be normalized.
Returns:
list of floats: The normalized data.
"""
min_val = min(data)
max_val = max(data)
normalized_data = [(x - min_val) / (max_val - min_val) for x in data]
return normalized_data
# Example usage
data = [10, 20, 30, 40, 50]
normalized_data = min_max_normalize(data)
def min_max_normalize(data):
"""
Normalize the data using Min-Max normalization.
# Example usage # Plotting the data points before and after normalization
data = [123,23,345,456,567,678,789,867,456,3231,4342,456] plt.figure(figsize=(12, 6))
normalized_data = min_max_normalize(data)
# Original data
print("Original Data: ", data) plt.subplot(1, 2, 1)
print("Min-Max Normalized Data: ", normalized_data) for feature in data:
plt.plot(data[feature], 'o-', label=feature)
# Plotting the data points before and after normalization plt.title('Original Data')
plt.figure(figsize=(12, 6)) plt.xlabel('Index')
plt.ylabel('Value')
# Original data plt.legend()
plt.subplot(1, 2, 1)
plt.plot(data, 'bo-', label='Original Data') # Normalized data
plt.title('Original Data') plt.subplot(1, 2, 2)
plt.xlabel('Index') for feature in normalized_data:
plt.ylabel('Value') plt.plot(normalized_data[feature], 'o-', label=feature)
plt.legend() plt.title('Z-score Normalized Data')
plt.xlabel('Index')
# Normalized data plt.ylabel('Normalized Value')
plt.subplot(1, 2, 2) plt.legend()
plt.plot(normalized_data, 'ro-', label='Normalized Data')
plt.title('Min-Max Normalized Data') plt.tight_layout()
plt.xlabel('Index') plt.show()
plt.ylabel('Normalized Value')
plt.legend()
plt.tight_layout()
plt.show()
Original Data: [123, 23, 345, 456, 567, 678, 789, 867, 456, 3231, 4342, 456]
Min-Max Normalized Data: [0.0231535077564251, 0.0, 0.07455429497568881, 0.10025468858532068, 0.12595508219495252, 0.151655475804584
import numpy as np
import matplotlib.pyplot as plt
plt.tight_layout()
plt.show()
import numpy as np
import matplotlib.pyplot as plt
l = np.array([10, 20, 30, 408, 501, 606, 705, 801, 90, 1])
def min_max_normalize(data):
min_val = np.min(data)
max_val = np.max(data)
return (data - min_val) / (max_val - min_val)
normalized_values = min_max_normalize(l)
plt.figure(figsize=(5,3))
plt.subplot(1, 2, 1)
plt.scatter(range(len(l)), l, color='blue', alpha=0.7)
plt.title('Original Data')
plt.xlabel('Index')
plt.ylabel('Value')
plt.subplot(1, 2, 2)
plt.scatter(range(len(normalized_values)), normalized_values, color='green', alpha=0.7)
plt.title('Min-Max Normalized Data')
plt.xlabel('Index')
plt.ylabel('Normalized Value')
plt.tight_layout()
plt.show()
keyboard_arrow_down WEEK-6
Double-click (or enter) to edit
#plotting
import numpy as np
import matplotlib.pyplot as plt
data = [10, 20, 30, 408, 501, 606, 705, 801, 90, 1]
data_array = np.array(data)
mean = np.mean(data_array)
std_dev = np.std(data_array)
z_scores = (data_array - mean) / std_dev
plt.figure(figsize=(5,4))
2.Visualizing the plots for the data distribution of Toyota.csv cars dataset.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
data=pd.read_csv("/content/Toyota.csv")
data.head()
s=data.select_dtypes(include='number').corr()
plt.figure(figsize=(5,5))
sns.heatmap(s,annot=True)
<Axes: >
keyboard_arrow_down WEEK-7
1.Build a regression model to predict employee Salary using Salary_Data data set.
#data load
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score,classification_report
df=pd.read_csv('/content/salary.csv')
print(df)
YearsExperience Salary
0 1.1 39343
plt.figure(figsize=(300,300))
plt.scatter(df['YearsExperience'],df['Salary']) import pandas as pd
plt.xlabel('YearsExperience') data = pd.read_csv('/content/weatherAUS.csv')
plt.ylabel('Salary') print(data.head())
plt.title('Salary vs Years of Experience') print(data.info())
plt.show() print(data.describe())
print(data.shape)
Confusion Matrix:
[[20792 1306]
[ 3355 2986]]
Classification Report:
precision recall f1-score support
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
n_iter_i = _check_optimize_result(
digits = load_digits()
accuracy 1.00 40
macro avg 1.00 1.00 1.00 40
weighted avg 1.00 1.00 1.00 40
AUC: 1.0
AUC: 1.0000
Classification metrics
keyboard_arrow_down WEEK-9
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, roc_curve
import matplotlib.pyplot as pl
dr = pd.read_csv('/content/moonDataset.csv')
X = dr[['X1','X2','X3']]
import cv2
y = dr['label']
from google.colab.patches import cv2_imshow
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
svm = SVC(kernel ='rbf')
# Read the image using cv2.imread
svm.fit(X_train, y_train)
img = cv2.imread('/content/image.jpg')
y_pred = svm.predict(X_test)
# Display the image using cv2_imshow
#computing confusion matrix manually
cv2_imshow(img)
TP = np.sum((y_test == 1) & (y_pred == 1))
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
FP = np.sum((y_test == 0) & (y_pred == 1))
cv2_imshow(gray_image)
FN = np.sum((y_test == 1) & (y_pred == 0))
rotated_image_90 = cv2.transpose(image)
Width: 500
Height: 500
WEEK 10
import cv2
from google.colab.patches import cv2_imshow
image = cv2.imread('/content/AIT2.jpeg') 4.Program for color mixing in OpenCV.
if image is None:
print("Error: Could not load the image.")
else: import cv2
x = 100 import numpy as np
y = 50 from google.colab.patches import cv2_imshow
pixel_value = image[y, x] image1 = cv2.imread('/content/AIT2.jpeg')
print("Pixel value at (", x, ",", y, "):", pixel_value) image2 = cv2.imread('/content/AIT.jpeg')
new_pixel_value = [255, 0, 0] if image1.shape != image2.shape:
image[y, x] = new_pixel_value image2 = cv2.resize(image2, (image1.shape[1], image1.shape[0]))
modified_pixel_value = image[y, x] alpha = 0.5
print("Modified pixel value at (", x, ",", y, "):", modified_pixel_value) beta = 1.0 - alpha
cv2_imshow(image) mixed_image = cv2.addWeighted(image1, alpha, image2, beta, 0.0)
cv2.waitKey(0) cv2_imshow(mixed_image)
cv2.destroyAllWindows() cv2.waitKey(0)
cv2.destroyAllWindows()
Pixel value at ( 100 , 50 ): [115 126 170]
Modified pixel value at ( 100 , 50 ): [255 0 0]
import cv2
import cv2 from google.colab.patches import cv2_imshow
from google.colab.patches import cv2_imshow image = cv2.imread('/content/AIT.jpeg')
import numpy as np sift = cv2.SIFT_create()
image = cv2.imread('/content/AIT2.jpeg') keypoints, descriptors = sift.detectAndCompute(image, None)
blurred_average = cv2.blur(image, (5, 5)) # Kernel size (5x5) image_with_keypoints = cv2.drawKeypoints(image, keypoints, None)
resized_average = cv2.resize(blurred_average, (100, 100)) # Resize to 200x200 cv2_imshow(image_with_keypoints)
cv2_imshow(resized_average) cv2.waitKey(0)
blurred_gaussian = cv2.GaussianBlur(image, (5, 5), 0) cv2.destroyAllWindows()
resized_gaussian = cv2.resize(blurred_gaussian, (100, 100)) # Resize to 200x200
cv2_imshow(resized_gaussian)
blurred_median = cv2.medianBlur(image, 5) # Kernel size 5
resized_median = cv2.resize(blurred_median, (100, 100)) # Resize to 200x200
cv2_imshow(resized_median)
blurred_bilateral = cv2.bilateralFilter(image, 9, 75, 75) # Adjust parameters for desired effect
resized_bilateral = cv2.resize(blurred_bilateral, (100, 100)) # Resize to 200x200
cv2_imshow(resized_bilateral)
import cv2
import numpy as np
from google.colab.patches import cv2_imshow