0% found this document useful (0 votes)
86 views12 pages

Lab Manual Ann

This document provides code for implementing and testing artificial neural networks using backpropagation and forward propagation algorithms on various datasets. It includes 4 programs: 1. Builds a neural network using backpropagation and tests it on a sample dataset. 2. Implements forward propagation on a sample network and dataset. 3. Builds a convolutional neural network on the Fashion-MNIST dataset. 4. Builds a convolutional neural network on the MNIST handwritten digits dataset.

Uploaded by

Siddu RN
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
86 views12 pages

Lab Manual Ann

This document provides code for implementing and testing artificial neural networks using backpropagation and forward propagation algorithms on various datasets. It includes 4 programs: 1. Builds a neural network using backpropagation and tests it on a sample dataset. 2. Implements forward propagation on a sample network and dataset. 3. Builds a convolutional neural network on the Fashion-MNIST dataset. 4. Builds a convolutional neural network on the MNIST handwritten digits dataset.

Uploaded by

Siddu RN
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 12

DEEP LEARNING LAB MANUAL(21SCSL26)

DEEP LEARNING LAB

PROGRAM -1

Build an Artificial Neural Network by implementing the Backpropagation


algorithm and test the same using appropriate data sets.

import random
from math import exp
from random import seed

# Initialize a network

def initialize_network(n_inputs, n_hidden, n_outputs):


network = list()
hidden_layer = [{'weights':[random.uniform(-0.5,0.5) for i in
range(n_inputs + 1)]} for i in range(n_hidden)]
network.append(hidden_layer)
output_layer = [{'weights':[random.uniform(-0.5,0.5) for i in
range(n_hidden + 1)]} for i in range(n_outputs)]
network.append(output_layer)
i= 1
print("\n The initialised Neural Network:\n")
for layer in network:
j=1
for sub in layer:
print("\n Layer[%d] Node[%d]:\n" %(i,j),sub)
j=j+1
i=i+1
return network

# Calculate neuron activation (net) for an input

def activate(weights, inputs):


activation = weights[-1]
for i in range(len(weights)-1):
activation += weights[i] * inputs[i]
return activation

# Transfer neuron activation to sigmoid function


def transfer(activation):
return 1.0 / (1.0 + exp(-activation))

DEPARTMENT OF CSE ,FETW SUK KLBG


DEEP LEARNING LAB MANUAL(21SCSL26)

# Forward propagate input to a network output


def forward_propagate(network, row):
inputs = row
for layer in network:
new_inputs = []
for neuron in layer:
activation = activate(neuron['weights'], inputs)
neuron['output'] = transfer(activation)
new_inputs.append(neuron['output'])
inputs = new_inputs
return inputs

# Calculate the derivative of an neuron output


def transfer_derivative(output):
return output * (1.0 - output)

# Backpropagate error and store in neurons


def backward_propagate_error(network, expected):
for i in reversed(range(len(network))):
layer = network[i]
errors = list()

if i != len(network)-1:
for j in range(len(layer)):
error = 0.0
for neuron in network[i + 1]:
error += (neuron['weights'][j] * neuron['delta'])
errors.append(error)
else:
for j in range(len(layer)):
neuron = layer[j]
errors.append(expected[j] - neuron['output'])

for j in range(len(layer)):
neuron = layer[j]
neuron['delta'] = errors[j] *
transfer_derivative(neuron['output'])

# Update network weights with error


def update_weights(network, row, l_rate):
for i in range(len(network)):
inputs = row[:-1]
if i != 0:
inputs = [neuron['output'] for neuron in network[i - 1]]

DEPARTMENT OF CSE ,FETW SUK KLBG


DEEP LEARNING LAB MANUAL(21SCSL26)

for neuron in network[i]:


for j in range(len(inputs)):
neuron['weights'][j] += l_rate * neuron['delta'] *
inputs[j]
neuron['weights'][-1] += l_rate * neuron['delta']

# Train a network for a fixed number of epochs


def train_network(network, train, l_rate, n_epoch, n_outputs):

print("\n Network Training Begins:\n")

for epoch in range(n_epoch):


sum_error = 0
for row in train:
outputs = forward_propagate(network, row)
expected = [0 for i in range(n_outputs)]
expected[row[-1]] = 1
sum_error += sum([(expected[i]-outputs[i])**2 for i in
range(len(expected))])
backward_propagate_error(network, expected)
update_weights(network, row, l_rate)
print('>epoch=%d, lrate=%.3f, error=%.3f' % (epoch, l_rate,
sum_error))

print("\n Network Training Ends:\n")

#Test training backprop algorithm


seed(2)
dataset = [[2.7810836,2.550537003,0],
[1.465489372,2.362125076,0],
[3.396561688,4.400293529,0],
[1.38807019,1.850220317,0],
[3.06407232,3.005305973,0],
[7.627531214,2.759262235,1],
[5.332441248,2.088626775,1],
[6.922596716,1.77106367,1],
[8.675418651,-0.242068655,1],
[7.673756466,3.508563011,1]]

print("\n The input Data Set :\n",dataset)


n_inputs = len(dataset[0]) - 1
print("\n Number of Inputs :\n",n_inputs)
n_outputs = len(set([row[-1] for row in dataset]))

DEPARTMENT OF CSE ,FETW SUK KLBG


DEEP LEARNING LAB MANUAL(21SCSL26)

print("\n Number of Outputs :\n",n_outputs)

#Network Initialization
network = initialize_network(n_inputs, 2, n_outputs)

# Training the Network


train_network(network, dataset, 0.5, 20, n_outputs)

print("\n Final Neural Network :")

i= 1
for layer in network:
j=1
for sub in layer:
print("\n Layer[%d] Node[%d]:\n" %(i,j),sub)
j=j+1
i=i+1

OUTPUT :
The input Data Set :
[[2.7810836, 2.550537003, 0], [1.465489372, 2.362125076, 0], [3.396561688, 4.400293529, 0], [1.38807019,
1.850220317, 0], [3.06407232, 3.005305973, 0], [7.627531214, 2.759262235, 1], [5.332441248, 2.088626775, 1],
[6.922596716, 1.77106367, 1], [8.675418651, -0.242068655, 1], [7.673756466, 3.508563011, 1]]

Number of Inputs :
2

Number of Outputs :
2

The initialised Neural Network:

Layer[1] Node[1]:
{'weights': [0.4560342718892494, 0.4478274870593494, -0.4434486322731913]}

Layer[1] Node[2]:
{'weights': [-0.41512800484107837, 0.33549887812944956, 0.2359699890685233]}

Layer[2] Node[1]:
{'weights': [0.1697304014402209, -0.1918635424108558, 0.10594416567846243]}

Layer[2] Node[2]:
{'weights': [0.10680173364083789, 0.08120401711200309, -0.3416171297451944]}

Network Training Begins:

>epoch=0, lrate=0.500, error=5.278


>epoch=1, lrate=0.500, error=5.122

DEPARTMENT OF CSE ,FETW SUK KLBG


DEEP LEARNING LAB MANUAL(21SCSL26)

>epoch=2, lrate=0.500, error=5.006


>epoch=3, lrate=0.500, error=4.875
>epoch=4, lrate=0.500, error=4.700
>epoch=5, lrate=0.500, error=4.466
>epoch=6, lrate=0.500, error=4.176
>epoch=7, lrate=0.500, error=3.838
>epoch=8, lrate=0.500, error=3.469
>epoch=9, lrate=0.500, error=3.089
>epoch=10, lrate=0.500, error=2.716
>epoch=11, lrate=0.500, error=2.367
>epoch=12, lrate=0.500, error=2.054
>epoch=13, lrate=0.500, error=1.780
>epoch=14, lrate=0.500, error=1.546
>epoch=15, lrate=0.500, error=1.349
>epoch=16, lrate=0.500, error=1.184
>epoch=17, lrate=0.500, error=1.045
>epoch=18, lrate=0.500, error=0.929
>epoch=19, lrate=0.500, error=0.831

Network Training Ends:

Final Neural Network :

Layer[1] Node[1]:
{'weights': [0.8642508164347664, -0.8497601716670761, -0.8668929014392035], 'output': 0.9295587965836384,
'delta': 0.005645382825629247}

Layer[1] Node[2]:
{'weights': [-1.2934302410111027, 1.7109363237151511, 0.7125327507327331], 'output': 0.04760703296164143,
'delta': -0.005928559978815065}

Layer[2] Node[1]:
{'weights': [-1.3098359335096292, 2.16462207144596, -0.3079052288835877], 'output': 0.1989556395205846,
'delta': -0.03170801648036036}

Layer[2] Node[2]:
{'weights': [1.5506793402414165, -2.11315950446121, 0.1333585709422027], 'output': 0.8095042653312078,
'delta': 0.029375796661413225}

DEPARTMENT OF CSE ,FETW SUK KLBG


DEEP LEARNING LAB MANUAL(21SCSL26)

PROGRAM -2
Build an Artificial Neural Network by implementing the Forward propagation algorithm and
test the same using appropriate data sets.
from math import exp
def activate(weights, inputs):
activation = weights[-1]
for i in range(len(weights)-1):
activation += weights[i]*inputs[i]
return activation

def transfer(activation):
return 1.0/(1.0 + exp(-activation))

def forward_propagate(network,row):
inputs = row
for layer in network:
new_inputs = []
for neuron in layer:
activation = activate(neuron['weights'],inputs)
neuron['output'] = transfer(activation)
new_inputs.append(neuron['output'])
inputs = new_inputs
return inputs

network = [[{'weights':[0.13436424411240122,
.8474337369372327,0.763774618976614]}],[{'weights':[0.2550690257394217,
0.49543508709194095]},{'weights':[0.4494910647887381,0.651592972722763]}]]
row = [1, 0, None]
output = forward_propagate(network, row)
print(output)

OUTPUT:

[0.6629970129852887, 0.7253160725279748]

DEPARTMENT OF CSE ,FETW SUK KLBG


DEEP LEARNING LAB MANUAL(21SCSL26)

PROGRAM -3

Convolutional_NN_Fashion_Dataset.py

import tensorflow as tf
from keras.datasets import fashion_mnist
from keras.datasets import mnist
from keras.layers import Conv2D, Flatten, Dense
import numpy as np
import numpy.random as nr
import matplotlib.pyplot as plt
from keras.models import Sequential
from sklearn.metrics import accuracy_score
nc = 10

(Xtrain, ytrain),(Xtest,ytest) = fashion_mnist.load_data()


plt.figure(1)
imgplot1 = plt.imshow(Xtrain[nr.randint(60000)])
plt.show()

plt.figure(2)
imgplot2 = plt.imshow(Xtrain[nr.randint(60000)])
plt.show()

Xtrain = Xtrain.reshape(60000, 28, 28, 1)


Xtest = Xtest.reshape(10000, 28, 28, 1)

ytrainEnc = tf.one_hot(ytrain, depth = nc)


ytestEnc = tf.one_hot(ytest, depth = nc)

ypred = model.predict(Xtest)
ypred = np.argmax(ypred, axis=1)
score = accuracy_score(ypred, ytest)
print('accuracy score is ' ,100*score,'%')

DEPARTMENT OF CSE ,FETW SUK KLBG


DEEP LEARNING LAB MANUAL(21SCSL26)

OUTPUT:

DEPARTMENT OF CSE ,FETW SUK KLBG


DEEP LEARNING LAB MANUAL(21SCSL26)

PROGRAM -4

Convolutional_NN_MNIST_Dataset.py

import tensorflow as tf
from keras.datasets import mnist
from keras.layers import Conv2D, Flatten, Dense
import numpy as np
import numpy.random as nr
import matplotlib.pyplot as plt
from keras.models import Sequential
from sklearn.metrics import accuracy_score
nc = 10

(Xtrain, ytrain),(Xtest,ytest) = mnist.load_data()


plt.figure(1)
imgplot1 = plt.imshow(Xtrain[nr.randint(60000)])
plt.show()

plt.figure(2)
imgplot2 = plt.imshow(Xtrain[nr.randint(60000)])
plt.show()

Xtrain = Xtrain.reshape(60000, 28, 28, 1)


Xtest = Xtest.reshape(10000, 28, 28, 1)

ytrainEnc = tf.one_hot(ytrain, depth = nc)


ytestEnc = tf.one_hot(ytest, depth = nc)

model.fit(Xtrain, ytrainEnc, validation_data= ( Xtest,ytestEnc), epochs=3)

ypred = model.predict(Xtest)
ypred = np.argmax(ypred, axis=1)
score = accuracy_score(ypred, ytest)
print('accuracy score is ' ,100*score,'%')

OUTPUT:

DEPARTMENT OF CSE ,FETW SUK KLBG


DEEP LEARNING LAB MANUAL(21SCSL26)

DEPARTMENT OF CSE ,FETW SUK KLBG


DEEP LEARNING LAB MANUAL(21SCSL26)

PROGRAM -5

DeepLearning_CIFAR_10.py

import tensorflow as tf
from keras.datasets import cifar10
from keras.layers import Conv2D, Flatten, Dense,MaxPooling2D
import numpy as np
import numpy.random as nr
import matplotlib.pyplot as plt
from keras.models import Sequential
from sklearn.metrics import accuracy_score
from keras.optimizers import SGD
nc = 10

(Xtrain, ytrain),(Xtest,ytest) = cifar10.load_data()

plt.figure(1)
imgplot1 = plt.imshow(Xtrain[nr.randint(50000)])
plt.show()

plt.figure(2)
imgplot2 = plt.imshow(Xtrain[nr.randint(50000)])
plt.show()

Xtrain = Xtrain.astype('float32')
Xtrain = Xtrain[0:20000,:]/255.0
Xtest = Xtest.astype('float32')
Xtest = Xtest/255.0

ytrain = ytrain[:,0]
ytrainEnc = tf.one_hot(ytrain[0:20000],depth=nc)
ytest = ytest[:,0]
ytestEnc = tf.one_hot(ytest,depth=nc)

opt = SGD(lr = 0.001, momentum=0.9)


model.compile(optimizer = opt, loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(Xtrain,ytrainEnc, epochs = 10, batch_size=550,
validation_data = (Xtest,ytestEnc))

ypred = model.predict(Xtest)
ypred = np.argmax(ypred,axis=1)

score = accuracy_score(ypred,ytest)

DEPARTMENT OF CSE ,FETW SUK KLBG


DEEP LEARNING LAB MANUAL(21SCSL26)

print('Accuracy score is',100*score,'%')

OUTPUT:

DEPARTMENT OF CSE ,FETW SUK KLBG

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy