0% found this document useful (0 votes)
5 views3 pages

nndlrepo2

Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
5 views3 pages

nndlrepo2

Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 3

1.Simple Vector addition 2.Regression model 3.Implement Perceptron 4.

Implement feedforwared network


# importing packages import tensorflow as tf #exp4 Implement a feed forward network in tensorflow/keras
import numpy as np
import tensorflow as tf from tensorflow import keras import tensorflow as tf
from keras.models import Sequential
# creating a scalar from tensorflow.keras.layers import Dense from tensorflow import keras
from keras.layers import Dense
scalar = tf.constant(7) from tensorflow.keras.optimizers import SGD from tensorflow.keras.layers import Dense
from sklearn.model_selection import train_test_split
scalar import numpy as np import numpy as np
from sklearn.metrics import mean_squared_error
scalar.ndim # Generate some sample data for a logical OR operation # Generate some sample data
import matplotlib.pyplot as plt
# create a vector X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) # Input features X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) # Input features
np.random.seed(42)
vector = tf.constant([10, 10]) y = np.array([0, 1, 1, 1]) # Output labels (OR gate) y = np.array([0, 1, 1, 0]) # Output labels (XOR gate)
X = np.random.rand(100, 1)
# checking the dimensions of vector # Define a simple perceptron model # Define a feedforward neural network model
y = 3 * X + 2 + 0.1 * np.random.randn(100, 1)
vector.ndim model = keras.Sequential([ model = keras.Sequential([
# Split the data into training and testing sets
# creating a matrix Dense(units=1, input_dim=2, activation='sigmoid') # 2 input features, 1 Dense(units=4, input_dim=2, activation='relu'), # 2 input features, 4 hidden
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
matrix = tf.constant([[1, 2], [3, 4]]) output unit, sigmoid activation units with ReLU activation
random_state=42)
print(matrix) ]) Dense(units=1, activation='sigmoid') # 1 output unit with sigmoid
# Build the regression model
print('the number of dimensions of a matrix is : ' +str(matrix.ndim)) # Compile the model activation
model = Sequential()
# creating two tensors model.compile(optimizer=SGD(learning_rate=0.1), ])
model.add(Dense(units=10, input_dim=1, activation='relu')) # Hidden layer
matrix = tf.constant([[1, 2], [3, 4]]) loss='mean_squared_error', metrics=['accuracy']) # Compile the model
with ReLU activation
matrix1 = tf.constant([[2, 4], [6, 8]]) # Train the model model.compile(optimizer='adam', loss='binary_crossentropy',
model.add(Dense(units=1)) # Output layer
# addition of two matrices model.fit(X, y, epochs=1000, verbose=0) # You can adjust the number of metrics=['accuracy'])
# Compile the model
print(matrix+matrix1) epochs # Train the model
model.compile(optimizer='adam', loss='mean_squared_error')
# subtraction of two matrices # Evaluate the model model.fit(X, y, epochs=1000, verbose=0) # You can adjust the number of
# Train the model
print(matrix1 - matrix) loss, accuracy = model.evaluate(X, y) epochs
model.fit(X_train, y_train, epochs=10, batch_size=8, verbose=1,
# multiplication of two matrices print("Loss:", loss) # Evaluate the model
validation_split=0.2)
print(matrix1 * matrix) print("Accuracy:", accuracy) loss, accuracy = model.evaluate(X, y)
# Evaluate the model on the test set
# division of two matrices # Make predictions print("Loss:", loss)
y_pred = model.predict(X_test)
print(matrix1 / matrix) predictions = model.predict(X) print("Accuracy:", accuracy)
# Calculate and print the mean squared error
# creating a matrix print("Predictions:") # Make predictions
mse = mean_squared_error(y_test, y_pred)
matrix = tf.constant([[1, 2], [3, 4]]) print(predictions) predictions = model.predict(X)
print("Mean Squared Error:", mse)
print(matrix) print("Predictions:")
# Plot the results
# transpose of the matrix print(predictions)
plt.scatter(X_test, y_test, label='True data')
print(tf.transpose(matrix))
plt.scatter(X_test, y_pred, label='Predicted data')
# dot product of matrices
plt.xlabel('X')
print('dot product of matrices is : ' +str(tf.tensordot(matrix, matrix,
plt.ylabel('y')
axes=1)))
plt.legend()
plt.show()

5.Image Classifier custom_image = Image.open(custom_image_path) 6.Tuning hyper parameter model_3.compile(optimizer=opt_3,


#exp5 custom_image = custom_image.resize((32, 32)) # Resize the image to #exp6 improve the deep learning model by fine tuning hyper parameters loss='sparse_categorical_crossentropy', metrics=['accuracy'])
import tensorflow as tf match the input shape of the from tensorflow.keras.datasets import mnist model_3.fit(x=X_train, y=y_train, validation_data=(X_test, y_test),
from tensorflow import keras model (X_train, y_train), (X_test, y_test) = mnist.load_data() epochs=10)
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, custom_image = np.array(custom_image) / 255.0 # Normalize pixel values X_train.shape, y_train.shape, X_test.shape, y_test.shape # Greatly add number of parameters (Model 4)
Dense, Dropout to [0, 1] # Visualize the first input opt_4 = Adam(learning_rate=0.001)
from tensorflow.keras.datasets import cifar10 custom_image = np.expand_dims(custom_image, axis=0) # Add batch import matplotlib.pyplot as plt model_4 = Sequential([layers.Input((28, 28)), layers.Lambda(lambda x: x /
from tensorflow.keras.utils import to_categorical dimension plt.imshow(X_train[0]) 255), layers.Flatten(),
import numpy as np # Use the trained model to predict the class of the custom image # Create Adam Optimizer layers.Dense(128, activation='relu'), layers.Dense(128, activation='relu'),
from PIL import Image predicted_probs = model.predict(custom_image) from tensorflow.keras.optimizers import Adam layers.Dense(10,
# Load and preprocess the CIFAR-10 dataset predicted_class = np.argmax(predicted_probs) opt_1 = Adam(learning_rate=0.001) activation='softmax')])
(x_train, y_train), (x_test, y_test) = cifar10.load_data() # Define class labels for CIFAR-10 dataset # Base Model (Model 1) model_4.compile(optimizer=opt_4,
x_train, x_test = x_train / 255.0, x_test / 255.0 # Normalize pixel values to class_labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', from tensorflow.keras import layers loss='sparse_categorical_crossentropy', metrics=['accuracy'])
the range [0, 1] 'horse', 'ship', 'truck'] from tensorflow.keras.models import Sequential model_4.fit(x=X_train, y=y_train, validation_data=(X_test, y_test),
y_train = to_categorical(y_train, 10) # One-hot encode the labels # Print the predicted class label model_1 = Sequential([layers.Input((28, 28)), layers.Lambda(lambda x: x / epochs=10)
y_test = to_categorical(y_test, 10) print("Predicted Class Label:", class_labels[predicted_class]) 255), layers.Flatten(), # Add Regularization / Dropout to reduce overfitting
# Define the CNN model layers.Dense(10, activation='softmax')]) from tensorflow.keras.regularizers import L2
model = keras.Sequential([ model_1.compile(optimizer=opt_1, opt_5 = Adam(learning_rate=0.001)
Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model_5 = Sequential([layers.Input((28, 28)), layers.Lambda(lambda x: x /
MaxPooling2D((2, 2)), model_1.fit(x=X_train, y=y_train, validation_data=(X_test, y_test), 255), layers.Flatten(),
Conv2D(64, (3, 3), activation='relu'), epochs=10) layers.Dense(128, activation='relu', kernel_regularizer=L2(0.001)),
MaxPooling2D((2, 2)), # Adding number of parameters (Model 2) layers.Dropout(0.05),
Conv2D(64, (3, 3), activation='relu'), opt_2 = Adam(learning_rate=0.001) layers.Dense(128, activation='relu', kernel_regularizer=L2(0.001)),
Flatten(), model_2 = Sequential([layers.Input((28, 28)), layers.Lambda(lambda x: x / layers.Dropout(0.05),
Dense(64, activation='relu'), 255), layers.Flatten(), layers.Dense(128, activation='relu', kernel_regularizer=L2(0.001)),
Dropout(0.5), layers.Dense(32, activation='relu'), layers.Dense(10, activation='softmax')]) layers.Dropout(0.05),
Dense(10, activation='softmax') model_2.compile(optimizer=opt_2, layers.Dense(10, activation='softmax')])
]) loss='sparse_categorical_crossentropy', metrics=['accuracy']) model_5.compile(optimizer=opt_5,
# Compile the model model_2.fit(x=X_train, y=y_train, validation_data=(X_test, y_test), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.compile(optimizer='adam', loss='categorical_crossentropy', epochs=10) model_5.fit(x=X_train, y=y_train, validation_data=(X_test, y_test),
metrics=['accuracy']) # Increasing the learning rate (Model 3) epochs=10)
# Train the model opt_3 = Adam(learning_rate=0.00001) # Increase number of epochs
model.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test)) model_3 = Sequential([layers.Input((28, 28)), layers.Lambda(lambda x: x / model_5.fit(x=X_train, y=y_train, validation_data=(X_test, y_test),
# Load and preprocess the custom image 255), layers.Flatten(), epochs=10)
custom_image_path = 'test.jpg' layers.Dense(32, activation='relu'), layers.Dense(10, activation='softmax')])
7.Transfer leaning Image model = tf.keras.Sequential([ 8.Use Pre-trained model onkeras model.fit(
#exp7 feature_extractor_layer, tf.keras.layers.Dropout(0.3), #8modified train_images,
import matplotlib.pylab as plt tf.keras.layers.Dense(3,activation='softmax') import numpy as np train_labels,
import tensorflow as tf ]) from tensorflow.keras.applications import MobileNetV2 batch_size=32,
import tensorflow_hub as hub model.summary() from tensorflow.keras.models import Sequential epochs=10,
import os model.compile( from tensorflow.keras.layers import Dense, GlobalAveragePooling2D validation_data=(test_images, test_labels))
import numpy as np optimizer=tf.keras.optimizers.Adam(), from tensorflow.keras.optimizers import Adam # Evaluate the model
import tensorflow_datasets as tfds loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True), from tensorflow.keras.datasets import cifar10 test_loss, test_acc = model.evaluate(test_images, test_labels)
import warnings metrics=['acc']) from tensorflow.keras.utils import to_categorical print(f'Test accuracy: {test_acc}')
warnings.filterwarnings('ignore') history = model.fit(train_dataset, epochs=6, validation_data=val_dataset) # Load CIFAR-10 dataset
datasets, info = tfds.load(name='beans', with_info=True, result=model.evaluate(test_dataset) (train_images, train_labels), (test_images, test_labels) =
as_supervised=True, for test_sample in datasets[1].take(10): cifar10.load_data()
split=['train','test','validation']) image, label = test_sample[0], test_sample[1] # Normalize pixel values to the range [0, 1]
info image_scaled, label_arr= scale(test_sample[0], test_sample[1]) train_images = train_images.astype('float32') / 255.0
train, info_train = tfds.load(name='beans', with_info=True, split='test') image_scaled = np.expand_dims(image_scaled, axis=0) test_images = test_images.astype('float32') / 255.0
tfds.show_examples(train,info_train) img = tf.keras.preprocessing.image.img_to_array(image) # Convert labels to one-hot encoding
def scale(image, label): pred=model.predict(image_scaled) train_labels = to_categorical(train_labels, num_classes=10)
image = tf.cast(image, tf.float32) print(pred) test_labels = to_categorical(test_labels, num_classes=10)
image /= 255.0 plt.figure() # Load the MobileNetV2 pre-trained model without the top classification
return tf.image.resize(image,[224,224]), tf.one_hot(label, 3) plt.imshow(image) layer
def get_dataset(batch_size=32): plt.show() base_model = MobileNetV2(weights='imagenet', include_top=False,
train_dataset_scaled = print("Actual Label: %s" % info.features["label"].names[label.numpy()]) input_shape=(32, 32, 3))
datasets[0].map(scale).shuffle(1000).batch(batch_size) print("Predicted Label: %s" % # Add your own classification layers on top of the pre-trained model
test_dataset_scaled = datasets[1].map(scale).batch(batch_size) info.features["label"].names[np.argmax(pred)]) model = Sequential()
val_dataset_scaled = datasets[2].map(scale).batch(batch_size) for f0,f1 in datasets[1].map(scale).batch(200): model.add(base_model)
return train_dataset_scaled, test_dataset_scaled, val_dataset_scaled y=np.argmax(f1, axis=1) model.add(GlobalAveragePooling2D())
train_dataset, test_dataset, val_dataset = get_dataset() y_pred=np.argmax(model.predict(f0),axis=1) model.add(Dense(256, activation='relu'))
train_dataset.cache() print(tf.math.confusion_matrix(labels=y, predictions=y_pred, model.add(Dense(10, activation='softmax')) # 10 classes in CIFAR-10
val_dataset.cache() num_classes=3)) # Freeze the pre-trained layers to avoid retraining
len(list(datasets[0])) for layer in base_model.layers:
feature_extractor = "https://tfhub.dev/google/tf2- layer.trainable = False
preview/mobilenet_v2/feature_vector/4" # Compile the model
feature_extractor_layer = hub.KerasLayer(feature_extractor, model.compile(optimizer=Adam(), loss='categorical_crossentropy',
input_shape=(224,224,3)) metrics=['accuracy'])
feature_extractor_layer.trainable = False # Train the model with transfer learning

9.Sentiment Analysis model.compile(loss='binary_crossentropy', optimizer='adam', 10.LSTM # Define the autoencoder model


from keras.datasets import imdb metrics=['accuracy']) #ex10modified autoencoder = Model(inputs, decoder)
vocabulary_size = 5000 batch_size = 64 import numpy as np # Compile the model
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words = num_epochs = 3 import pandas as pd autoencoder.compile(optimizer='adam', loss='mse')
vocabulary_size) X_valid, y_valid = X_train[:batch_size], y_train[:batch_size] import tensorflow as tf # Train the autoencoder
print('Loaded dataset with {} training samples, {} test X_train2, y_train2 = X_train[batch_size:], y_train[batch_size:] from tensorflow.keras.layers import Input, LSTM, RepeatVector autoencoder.fit(X_train, X_train, epochs=50, batch_size=32,
samples'.format(len(X_train), len(X_test))) model.fit(X_train2, y_train2, validation_data=(X_valid, y_valid), from tensorflow.keras.models import Model validation_split=0.2)
print('---review---') batch_size=batch_size, from sklearn.preprocessing import StandardScaler # Generate predictions using the trained autoencoder
print(X_train[6]) epochs=num_epochs) # Load the data from 'JNJ.csv' decoded_data = autoencoder.predict(X_train)
print('---label---') scores = model.evaluate(X_test, y_test, verbose=0) df = pd.read_csv('JNJ.csv') # Perform inverse scaling to get the original data back
print(y_train[6]) print('Test accuracy:', scores[1]) data = df[['Close']].values # Assuming 'Close' column contains the data of decoded_data_original =
word2id = imdb.get_word_index() interest scaler.inverse_transform(decoded_data.reshape(-1, input_dim))
id2word = {i: word for word, i in word2id.items()} scaler = StandardScaler() # Show example output (first sequence) after training
print('---review with words---') scaled_data = scaler.fit_transform(data) print("Original Data:")
print([id2word.get(i, ' ') for i in X_train[6]]) # Define input sequence length and dimensionality print(data[:seq_length])
print('---label---') seq_length = 10 print("\nReconstructed Data:")
print(y_train[6]) input_dim = scaled_data.shape[1] print(decoded_data_original[0])
print('Maximum review length: {}'.format( # Define LSTM units in the encoder
len(max((X_train + X_test), key=len)))) latent_dim = 3
print('Minimum review length: {}'.format( # Function to create sequences from data
len(min((X_test + X_test), key=len)))) def create_sequences(data, seq_length):
from keras.preprocessing import sequence sequences = []
max_words = 500 for i in range(len(data) - seq_length + 1):
X_train = sequence.pad_sequences(X_train, maxlen=max_words) sequence = data[i : i + seq_length]
X_test = sequence.pad_sequences(X_test, maxlen=max_words) sequences.append(sequence)
from keras import Sequential return np.array(sequences)
from keras.layers import Embedding, LSTM, Dense, Dropout # Create sequences for training the autoencoder
embedding_size=32 X_train = create_sequences(scaled_data, seq_length)
model=Sequential() # Define input layer
model.add(Embedding(vocabulary_size, embedding_size, inputs = Input(shape=(seq_length, input_dim))
input_length=max_words)) # Encoder
model.add(LSTM(100)) encoder = LSTM(latent_dim, return_sequences=False)(inputs)
model.add(Dense(1, activation='sigmoid')) encoded = RepeatVector(seq_length)(encoder)
print(model.summary()) # Decoder
decoder = LSTM(input_dim, return_sequences=True)(encoded)
11.Image Generation GAN batch_size = 128
import numpy as np # Training loop
import matplotlib.pyplot as plt for epoch in range(epochs):
from tensorflow.keras.datasets import mnist noise = np.random.normal(0, 1, (batch_size, 100))
from tensorflow.keras.models import Sequential # Generate fake images from noise using the generator
from tensorflow.keras.layers import Dense, Reshape, Flatten fake_images = generator.predict(noise)
from tensorflow.keras.optimizers import Adam # Sample real images from the training data
# Load MNIST dataset (handwritten digits) idx = np.random.randint(0, train_images.shape[0], batch_size)
(train_images, _), (_, _) = mnist.load_data() real_images = train_images[idx]
# Normalize and reshape images # Combine real and fake images into a single batch
train_images = (train_images.astype('float32') - 127.5) / 127.5 X = np.concatenate([real_images, fake_images])
train_images = np.expand_dims(train_images, axis=-1) # Labels for discriminator (1 for real images, 0 for fake images)
generator = Sequential([ y_discriminator = np.zeros(2 * batch_size)
Dense(256, input_shape=(100,), activation='relu'), y_discriminator[:batch_size] = 1
Dense(512, activation='relu'), # Train discriminator on the batch
Dense(784, activation='tanh'), d_loss = discriminator.train_on_batch(X, y_discriminator)
Reshape((28, 28, 1))]) # Generate new noise for GAN training
# Define discriminator model noise = np.random.normal(0, 1, (batch_size, 100))
discriminator = Sequential([ # Labels for GAN (1 to trick the discriminator)
Flatten(input_shape=(28, 28, 1)), y_gan = np.ones(batch_size)
Dense(512, activation='relu'), # Train GAN (only updates the generator weights)
Dense(256, activation='relu'), g_loss = gan.train_on_batch(noise, y_gan)
Dense(1, activation='sigmoid')]) if epoch % 10 == 0:
discriminator.compile(optimizer=Adam(learning_rate=0.0002, print(f'Epoch: {epoch}, Discriminator Loss: {d_loss[0]}, Generator Loss:
beta_1=0.5), loss='binary_crossentropy', {g_loss}')
metrics=['accuracy']) # Generate and display some fake images
# Define GAN model (generator -> discriminator) noise = np.random.normal(0, 1, (10, 100))
gan = Sequential([generator, discriminator]) generated_images = generator.predict(noise)
# Compile GAN (frozen discriminator, GAN's loss combines generator's loss plt.figure(figsize=(10, 10))
and discriminator's loss) for i in range(10):
discriminator.trainable = False plt.subplot(1, 10, i + 1)
gan.compile(optimizer=Adam(learning_rate=0.0002, beta_1=0.5), plt.imshow(generated_images[i].reshape(28, 28), cmap='gray')
loss='binary_crossentropy') plt.axis('off')
# Training parameters plt.tight_layout()
epochs = 100 plt.show()

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy