ML Lab Report
ML Lab Report
OUTPUT
Training Data:
if output == "Yes":
for i in range(len(S)):
if S[i] != inputs[i]:
S[i] = '?' # Generalize S
# Remove inconsistent hypotheses from G
G = [g for g in G if all(g[i] == '?' or g[i] == inputs[i] for i in
range(len(S)))]
OUTPUT:
Final Specific Hypothesis (S): ['Sunny', 'Warm', '?', 'Strong', '?', '?']
Final General Hypotheses (G):
['Sunny', '?', '?', '?', '?', '?']
['?', 'Warm', '?', '?', '?', '?']
# Activation function
def sigmoid(x): return 1 / (1 + np.exp(-x))
def sigmoid_deriv(x): return x * (1 - x)
# Input (XOR)
X = np.array([[0,0],[0,1],[1,0],[1,1]])
y = np.array([[0],[1],[1],[0]])
# Random weights
np.random.seed(1)
w1 = np.random.rand(2,2)
w2 = np.random.rand(2,1)
# Training loop
for _ in range(10000):
h = sigmoid(np.dot(X, w1))
o = sigmoid(np.dot(h, w2))
d = (y - o) * sigmoid_deriv(o)
w2 += h.T.dot(d)
w1 += X.T.dot((d.dot(w2.T)) * sigmoid_deriv(h))
# Output
print("Output:")
print(o.round(3))
Output:
[[0.033]
[0.931]
[0.931]
[0.093]]
# Load dataset
df=pd.read_csv("C:/Users/ashwi/OneDrive/Documents/python
programs/training_data.csv")
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_s
tate=42)
y_pred=model.predict(X_test)
accuracy=accuracy_score(y_test, y_pred)
precision=precision_score(y_test, y_pred)
recall=recall_score(y_test, y_pred)
print(f"Accuracy:{accuracy:.2f}")
print(f"Precision: {precision:.2f}")
print(f"Recall:{recall:.2f}")
OUTPUT:
Accuracy:0.67
Precision: 0.67
Recall:1.00
Prediction (1 = Yes, 0 = No): 1
OUTPUT
Accuracy: 0.0
Correct Predictions: []
Wrong Predictions: [([1, 2], 0,1), ([2, 3], 0,1)]