0% found this document useful (0 votes)
9 views15 pages

Da 3 Lab DL 21BCE2687

deep learning coding questions with solution

Uploaded by

sahilbjain0
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
9 views15 pages

Da 3 Lab DL 21BCE2687

deep learning coding questions with solution

Uploaded by

sahilbjain0
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 15

NAME- SAHIL JAIN

REG NO-21BCE2687
Course Name- Deep Learning Lab
LAB-4 MULTI LAYER NN WITH GRADIENT DESCENT
Q-1)
Consider the weight, target and input as 0.0,0.8, 1.1 respectively. Repeat 4 iterations of the forward
pass to determine the predicted output. For each forward pass calculate and print the following
(a). Predicted output (Y_pred (b). Error or Delta=(Y-Y_pred) (c). Squared Error=(YY_pred)^2 (d).
Weight_Delta=Delta*input (e). NewWieght=old weight - Weight_Delta.

Ans:
# Initialize variables

weight = 0.0 target

= 0.8 input_val =

1.1

# Perform 4 iterations for

i in range(4):

# (a) Calculate predicted output

Y_pred = weight * input_val

# (b) Calculate Error or Delta

Delta = target - Y_pred

# (c) Calculate Squared Error

Squared_Error = Delta ** 2

# (d) Calculate Weight_Delta

Weight_Delta = Delta * input_val

# (e) Update the weight

new_weight = weight + Weight_Delta


# Print the results for each iteration
print(f"Iteration {i+1}:") print(f"Predicted

Output (Y_pred): {Y_pred:.4f}") print(f"Delta (Y -

Y_pred): {Delta:.4f}") print(f"Squared Error:

{Squared_Error:.4f}") print(f"Weight Delta:

{Weight_Delta:.4f}") print(f"New Weight:

{new_weight:.4f}") print("-" * 30)

# Update weight for the next iteration

weight = new_weight

Output:

Q-2)
Assume that the neurons have the sigmoid activation function to perform forward and backward
pass on the network. And also assume that the actual output of y is 0.5 and the learning rate is 1.
Now perform the backpropagation using backpropagation algorithm
Ans:
import numpy as np

class NeuralNetwork: def init (self, input_size,

hidden_size, output_size):

self.input_size = input_size

self.hidden_size = hidden_size

self.output_size = output_size

# Initialize weights with random values self.weights_input_hidden =

np.random.randn(self.input_size, self.hidden_size) self.weights_hidden_output =

np.random.randn(self.hidden_size, self.output_size)

# Initialize the biases with zeros

self.bias_hidden = np.zeros((1, self.hidden_size))

self.bias_output = np.zeros((1, self.output_size))

def sigmoid(self, x):

return 1 / (1 + np.exp(-x))

def sigmoid_derivative(self, x):

return x * (1 - x)

def feedforward(self, X): # Input to hidden layer self.hidden_activation =

np.dot(X, self.weights_input_hidden) + self.bias_hidden self.hidden_output =

self.sigmoid(self.hidden_activation)

# Hidden to output layer


self.output_activation = np.dot(self.hidden_output, self.weights_hidden_output) +
self.bias_output self.predicted_output = self.sigmoid(self.output_activation)

return self.predicted_output
def backward(self, X, y, learning_rate): # Compute the output layer error

output_error = y - self.predicted_output output_delta = output_error *

self.sigmoid_derivative(self.predicted_output)

# Compute the hidden layer error hidden_error =

np.dot(output_delta, self.weights_hidden_output.T) hidden_delta =

hidden_error * self.sigmoid_derivative(self.hidden_output)

# Update weights and biases self.weights_hidden_output +=

np.dot(self.hidden_output.T, output_delta) * learning_rate self.bias_output +=

np.sum(output_delta, axis=0, keepdims=True) * learning_rate self.weights_input_hidden

+= np.dot(X.T, hidden_delta) * learning_rate self.bias_hidden += np.sum(hidden_delta,

axis=0, keepdims=True) * learning_rate

def train(self, X, y, epochs, learning_rate):

for epoch in range(epochs):

# Perform feedforward pass

output = self.feedforward(X) #

Perform backpropagation

self.backward(X, y, learning_rate)

# Print the loss every 4000 epochs

if epoch % 4000 == 0:

loss = np.mean(np.square(y - output))


print(f"Epoch {epoch}, Loss: {loss}")

def predict(self, X):

return self.feedforward(X)

# XOR input and output


X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) y

= np.array([[0], [1], [1], [0]])

# Create a neural network instance nn =

NeuralNetwork(input_size=2, hidden_size=4, output_size=1)

# Train the neural network nn.train(X, y,

epochs=10000, learning_rate=0.1)

# Test the trained model output =

nn.predict(X) print("Predictions

after training:") print(output)

Output:

Q-3)
3.Construct a Feedback Network with backpropagation according to the input X = np.array(([2, 9],
[1, 5], [3, 6]), dtype=float) y = np.array(([92], [86], [89]), dtype=float) Construct a neural network
with inputSize = 2 outputSize = 1 and hiddenSize = 3. Train the network using backpropagation and
test it.
Ans:
import numpy as np

class FeedbackNeuralNetwork:

def init (self, input_size, hidden_size, output_size):

self.input_size = input_size

self.hidden_size = hidden_size

self.output_size = output_size
# Initialize weights with random values self.weights_input_hidden =

np.random.randn(self.input_size, self.hidden_size) self.weights_hidden_output =

np.random.randn(self.hidden_size, self.output_size)

# Initialize biases with zeros

self.bias_hidden = np.zeros((1, self.hidden_size))

self.bias_output = np.zeros((1, self.output_size))

def sigmoid(self, x):

return 1 / (1 + np.exp(-x))

def sigmoid_derivative(self, x):

return x * (1 - x)

def feedforward(self, X): # Input to hidden layer self.hidden_activation =

np.dot(X, self.weights_input_hidden) + self.bias_hidden self.hidden_output =

self.sigmoid(self.hidden_activation)

# Hidden to output layer

self.output_activation = np.dot(self.hidden_output, self.weights_hidden_output) +


self.bias_output self.predicted_output = self.sigmoid(self.output_activation)

return self.predicted_output

def backward(self, X, y, learning_rate): # Compute the output layer error

output_error = y - self.predicted_output output_delta = output_error *

self.sigmoid_derivative(self.predicted_output)

# Compute the hidden layer error hidden_error =

np.dot(output_delta, self.weights_hidden_output.T) hidden_delta =

hidden_error * self.sigmoid_derivative(self.hidden_output)
# Update weights and biases self.weights_hidden_output +=

np.dot(self.hidden_output.T, output_delta) * learning_rate self.bias_output +=

np.sum(output_delta, axis=0, keepdims=True) * learning_rate self.weights_input_hidden

+= np.dot(X.T, hidden_delta) * learning_rate self.bias_hidden += np.sum(hidden_delta,

axis=0, keepdims=True) * learning_rate

def train(self, X, y, epochs, learning_rate):

for epoch in range(epochs):

# Perform feedforward pass

output = self.feedforward(X) #
Perform backpropagation

self.backward(X, y, learning_rate)

# Print the loss every 1000 epochs

if epoch % 1000 == 0:

loss = np.mean(np.square(y - output))

print(f"Epoch {epoch}, Loss: {loss}")

def predict(self, X):

return self.feedforward(X)

# Input and output data

X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float) y

= np.array(([92], [86], [89]), dtype=float)

# Normalize the data X = X / np.amax(X, axis=0) y = y / 100

# Assuming the target output is scaled to [0, 1]

# Create a neural network instance nn =

FeedbackNeuralNetwork(input_size=2, hidden_size=3, output_size=1)


# Train the neural network nn.train(X, y,

epochs=10000, learning_rate=0.1)

# Test the trained model output =

nn.predict(X) print("Predictions

after training:")

print(output * 100) # Rescale the output back to the original range output:

Q-4)
. Consider a housing price data CSV file consist of 13 input features and 1 output feature that is ‘price’.
Construct a neural network and perform the following tasks a. Load and pre-process the dataset b.
Visulaize the distribution of price amount of the dataset using frequency plot c. Split the dataset into
Training and Testing d. Define a Neural Network using Numpy (do not use any other framework such
as Karas or Pytorch) e. Train the data and build the neural network model f. Evaluate on Testset g.
Calculate the Mean Squared Error and visualize the result.
Ans:
Load and preprocess the data import numpy as np
import pandas as pd from sklearn.model_selection
import train_test_split from sklearn.preprocessing
import StandardScaler

# Load the dataset data =

pd.read_csv("C://Users//Hp//Downloads//housing (2).csv")
# Separate input features (X) and output feature (y)

X = data.iloc[:, :-1].values # All columns except the last one as input features y

= data.iloc[:, -1].values # The last column as the output feature

# Standardize the input features (mean=0, variance=1) scaler

= StandardScaler()

X = scaler.fit_transform(X)

# Reshape the output to a column vector

y = y.reshape(-1, 1)

visualize and distribute the data import


matplotlib.pyplot as plt

# Plot the distribution of prices

plt.figure(figsize=(10, 6)) plt.hist(y, bins=30,

color='blue', edgecolor='black')

plt.title('Distribution of Housing Prices')

plt.xlabel('Price')

plt.ylabel('Frequency')
plt.show()
Split the Dataset into Training and Testing

# Split the dataset into training and testing sets

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

d. Define a Neural Network Using NumPy class


NeuralNetwork: def init (self, input_size,
hidden_size, output_size):

self.input_size = input_size

self.hidden_size = hidden_size

self.output_size = output_size # Initialize

weights and biases

self.weights_input_hidden =
np.random.randn(self.input_size,

self.hidden_size)

self.weights_hidden_output =

np.random.randn(self.hidden_size,

self.output_size) self.bias_hidden =

np.zeros((1, self.hidden_size))

self.bias_output = np.zeros((1,

self.output_size))

def sigmoid(self, x):

return 1 / (1 + np.exp(-x))

def sigmoid_derivative(self, x):

return x * (1 - x)

def feedforward(self, X): # Input to hidden layer self.hidden_activation =

np.dot(X, self.weights_input_hidden) + self.bias_hidden self.hidden_output =

self.sigmoid(self.hidden_activation)

# Hidden to output layer

self.output_activation = np.dot(self.hidden_output, self.weights_hidden_output) +


self.bias_output self.predicted_output = self.sigmoid(self.output_activation)

return self.predicted_output

def backward(self, X, y, learning_rate): # Compute the output layer error

output_error = y - self.predicted_output output_delta = output_error *

self.sigmoid_derivative(self.predicted_output)
# Compute the hidden layer error hidden_error =

np.dot(output_delta, self.weights_hidden_output.T) hidden_delta =

hidden_error * self.sigmoid_derivative(self.hidden_output)

# Update weights and biases self.weights_hidden_output +=

np.dot(self.hidden_output.T, output_delta) * learning_rate self.bias_output +=

np.sum(output_delta, axis=0, keepdims=True) * learning_rate self.weights_input_hidden

+= np.dot(X.T, hidden_delta) * learning_rate self.bias_hidden += np.sum(hidden_delta,

axis=0, keepdims=True) * learning_rate

def train(self, X, y, epochs, learning_rate):

for epoch in range(epochs): #

Perform feedforward pass output =

self.feedforward(X) # Perform

backpropagation self.backward(X, y,

learning_rate)

# Print the loss every 1000 epochs

if epoch % 1000 == 0:

loss = np.mean(np.square(y - output))

print(f"Epoch {epoch}, Loss: {loss}")

def predict(self, X):

return self.feedforward(X)

e. Train the Data and Build the Neural Network Model

# Create the neural network instance input_size =

X_train.shape[1] # Number of input features hidden_size =


10 # Number of hidden neurons output_size = 1 # Single

output neuron for price prediction

nn = NeuralNetwork(input_size=input_size, hidden_size=hidden_size, output_size=output_size) # Train

the network nn.train(X_train, y_train, epochs=10000, learning_rate=0.01)

f. Evaluate on Test Set

# Predict the prices on the test set predictions

= nn.predict(X_test)

g. Calculate the Mean Squared Error and Visualize the Result from sklearn.metrics

import mean_squared_error

# Calculate the Mean Squared Error (MSE) mse

= mean_squared_error(y_test, predictions)

print(f"Mean Squared Error on Test Set: {mse}")

# Visualize the comparison between actual and predicted prices


plt.figure(figsize=(10, 6)) plt.plot(y_test,

label='Actual Prices')

plt.plot(predictions, label='Predicted

Prices', linestyle='--') plt.title('Actual vs

Predicted Prices') plt.xlabel('Test

Sample') plt.ylabel('Price') plt.legend()

plt.show()

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy