Nndlrepo
Nndlrepo
open(custom_image_path)
# importing packages import tensorflow as tf #exp4 Implement a feed forward network in tensorflow/keras #exp5 custom_image = custom_image.resize((32, 32)) # Resize the image to
import numpy as np
import tensorflow as tf from tensorflow import keras import tensorflow as tf import tensorflow as tf match the input shape of the
from keras.models import Sequential
# creating a scalar from tensorflow.keras.layers import Dense from tensorflow import keras from tensorflow import keras model
from keras.layers import Dense
scalar = tf.constant(7) from tensorflow.keras.optimizers import SGD from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, custom_image = np.array(custom_image) / 255.0 # Normalize pixel values
from sklearn.model_selection import train_test_split
scalar import numpy as np import numpy as np Dense, Dropout to [0, 1]
from sklearn.metrics import mean_squared_error
scalar.ndim # Generate some sample data for a logical OR operation # Generate some sample data from tensorflow.keras.datasets import cifar10 custom_image = np.expand_dims(custom_image, axis=0) # Add batch
import matplotlib.pyplot as plt
# create a vector X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) # Input features X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) # Input features from tensorflow.keras.utils import to_categorical dimension
np.random.seed(42)
vector = tf.constant([10, 10]) y = np.array([0, 1, 1, 1]) # Output labels (OR gate) y = np.array([0, 1, 1, 0]) # Output labels (XOR gate) import numpy as np # Use the trained model to predict the class of the custom image
X = np.random.rand(100, 1)
# checking the dimensions of vector # Define a simple perceptron model # Define a feedforward neural network model from PIL import Image predicted_probs = model.predict(custom_image)
y = 3 * X + 2 + 0.1 * np.random.randn(100, 1)
vector.ndim model = keras.Sequential([ model = keras.Sequential([ # Load and preprocess the CIFAR-10 dataset predicted_class = np.argmax(predicted_probs)
# Split the data into training and testing sets
# creating a matrix Dense(units=1, input_dim=2, activation='sigmoid') # 2 input features, 1 Dense(units=4, input_dim=2, activation='relu'), # 2 input features, 4 hidden (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Define class labels for CIFAR-10 dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
matrix = tf.constant([[1, 2], [3, 4]]) output unit, sigmoid activation units with ReLU activation x_train, x_test = x_train / 255.0, x_test / 255.0 # Normalize pixel values to class_labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog',
random_state=42)
print(matrix) ]) Dense(units=1, activation='sigmoid') # 1 output unit with sigmoid the range [0, 1] 'horse', 'ship', 'truck']
# Build the regression model
print('the number of dimensions of a matrix is : ' +str(matrix.ndim)) # Compile the model activation y_train = to_categorical(y_train, 10) # One-hot encode the labels # Print the predicted class label
model = Sequential()
# creating two tensors model.compile(optimizer=SGD(learning_rate=0.1), ]) y_test = to_categorical(y_test, 10) print("Predicted Class Label:", class_labels[predicted_class])
model.add(Dense(units=10, input_dim=1, activation='relu')) # Hidden layer
matrix = tf.constant([[1, 2], [3, 4]]) loss='mean_squared_error', metrics=['accuracy']) # Compile the model # Define the CNN model
with ReLU activation
matrix1 = tf.constant([[2, 4], [6, 8]]) # Train the model model.compile(optimizer='adam', loss='binary_crossentropy', model = keras.Sequential([
model.add(Dense(units=1)) # Output layer
# addition of two matrices model.fit(X, y, epochs=1000, verbose=0) # You can adjust the number of metrics=['accuracy']) Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
# Compile the model
print(matrix+matrix1) epochs # Train the model MaxPooling2D((2, 2)),
model.compile(optimizer='adam', loss='mean_squared_error')
# subtraction of two matrices # Evaluate the model model.fit(X, y, epochs=1000, verbose=0) # You can adjust the number of Conv2D(64, (3, 3), activation='relu'),
# Train the model
print(matrix1 - matrix) loss, accuracy = model.evaluate(X, y) epochs MaxPooling2D((2, 2)),
model.fit(X_train, y_train, epochs=10, batch_size=8, verbose=1,
# multiplication of two matrices print("Loss:", loss) # Evaluate the model Conv2D(64, (3, 3), activation='relu'),
validation_split=0.2)
print(matrix1 * matrix) print("Accuracy:", accuracy) loss, accuracy = model.evaluate(X, y) Flatten(),
# Evaluate the model on the test set
# division of two matrices # Make predictions print("Loss:", loss) Dense(64, activation='relu'),
y_pred = model.predict(X_test)
print(matrix1 / matrix) predictions = model.predict(X) print("Accuracy:", accuracy) Dropout(0.5),
# Calculate and print the mean squared error
# creating a matrix print("Predictions:") # Make predictions Dense(10, activation='softmax')
mse = mean_squared_error(y_test, y_pred)
matrix = tf.constant([[1, 2], [3, 4]]) print(predictions) predictions = model.predict(X) ])
print("Mean Squared Error:", mse)
print(matrix) print("Predictions:") # Compile the model
# Plot the results
# transpose of the matrix print(predictions) model.compile(optimizer='adam', loss='categorical_crossentropy',
plt.scatter(X_test, y_test, label='True data')
print(tf.transpose(matrix)) metrics=['accuracy'])
plt.scatter(X_test, y_pred, label='Predicted data')
# dot product of matrices # Train the model
plt.xlabel('X')
print('dot product of matrices is : ' +str(tf.tensordot(matrix, matrix, model.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test))
plt.ylabel('y')
axes=1))) # Load and preprocess the custom image
plt.legend()
custom_image_path = 'test.jpg'
plt.show()
6.Tuning hyper parameter model_3.compile(optimizer=opt_3, 7.Transfer leaning Image model = tf.keras.Sequential([ 8.Use Pre-trained model onkeras model.fit(
#exp6 improve the deep learning model by fine tuning hyper parameters loss='sparse_categorical_crossentropy', metrics=['accuracy']) #exp7 feature_extractor_layer, tf.keras.layers.Dropout(0.3), #8modified train_images,
from tensorflow.keras.datasets import mnist model_3.fit(x=X_train, y=y_train, validation_data=(X_test, y_test), import matplotlib.pylab as plt tf.keras.layers.Dense(3,activation='softmax') import numpy as np train_labels,
(X_train, y_train), (X_test, y_test) = mnist.load_data() epochs=10) import tensorflow as tf ]) from tensorflow.keras.applications import MobileNetV2 batch_size=32,
X_train.shape, y_train.shape, X_test.shape, y_test.shape # Greatly add number of parameters (Model 4) import tensorflow_hub as hub model.summary() from tensorflow.keras.models import Sequential epochs=10,
# Visualize the first input opt_4 = Adam(learning_rate=0.001) import os model.compile( from tensorflow.keras.layers import Dense, GlobalAveragePooling2D validation_data=(test_images, test_labels))
import matplotlib.pyplot as plt model_4 = Sequential([layers.Input((28, 28)), layers.Lambda(lambda x: x / import numpy as np optimizer=tf.keras.optimizers.Adam(), from tensorflow.keras.optimizers import Adam # Evaluate the model
plt.imshow(X_train[0]) 255), layers.Flatten(), import tensorflow_datasets as tfds loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True), from tensorflow.keras.datasets import cifar10 test_loss, test_acc = model.evaluate(test_images, test_labels)
# Create Adam Optimizer layers.Dense(128, activation='relu'), layers.Dense(128, activation='relu'), import warnings metrics=['acc']) from tensorflow.keras.utils import to_categorical print(f'Test accuracy: {test_acc}')
from tensorflow.keras.optimizers import Adam layers.Dense(10, warnings.filterwarnings('ignore') history = model.fit(train_dataset, epochs=6, validation_data=val_dataset) # Load CIFAR-10 dataset
opt_1 = Adam(learning_rate=0.001) activation='softmax')]) datasets, info = tfds.load(name='beans', with_info=True, result=model.evaluate(test_dataset) (train_images, train_labels), (test_images, test_labels) =
# Base Model (Model 1) model_4.compile(optimizer=opt_4, as_supervised=True, for test_sample in datasets[1].take(10): cifar10.load_data()
from tensorflow.keras import layers loss='sparse_categorical_crossentropy', metrics=['accuracy']) split=['train','test','validation']) image, label = test_sample[0], test_sample[1] # Normalize pixel values to the range [0, 1]
from tensorflow.keras.models import Sequential model_4.fit(x=X_train, y=y_train, validation_data=(X_test, y_test), info image_scaled, label_arr= scale(test_sample[0], test_sample[1]) train_images = train_images.astype('float32') / 255.0
model_1 = Sequential([layers.Input((28, 28)), layers.Lambda(lambda x: x / epochs=10) train, info_train = tfds.load(name='beans', with_info=True, split='test') image_scaled = np.expand_dims(image_scaled, axis=0) test_images = test_images.astype('float32') / 255.0
255), layers.Flatten(), # Add Regularization / Dropout to reduce overfitting tfds.show_examples(train,info_train) img = tf.keras.preprocessing.image.img_to_array(image) # Convert labels to one-hot encoding
layers.Dense(10, activation='softmax')]) from tensorflow.keras.regularizers import L2 def scale(image, label): pred=model.predict(image_scaled) train_labels = to_categorical(train_labels, num_classes=10)
model_1.compile(optimizer=opt_1, opt_5 = Adam(learning_rate=0.001) image = tf.cast(image, tf.float32) print(pred) test_labels = to_categorical(test_labels, num_classes=10)
loss='sparse_categorical_crossentropy', metrics=['accuracy']) model_5 = Sequential([layers.Input((28, 28)), layers.Lambda(lambda x: x / image /= 255.0 plt.figure() # Load the MobileNetV2 pre-trained model without the top classification
model_1.fit(x=X_train, y=y_train, validation_data=(X_test, y_test), 255), layers.Flatten(), return tf.image.resize(image,[224,224]), tf.one_hot(label, 3) plt.imshow(image) layer
epochs=10) layers.Dense(128, activation='relu', kernel_regularizer=L2(0.001)), def get_dataset(batch_size=32): plt.show() base_model = MobileNetV2(weights='imagenet', include_top=False,
# Adding number of parameters (Model 2) layers.Dropout(0.05), train_dataset_scaled = print("Actual Label: %s" % info.features["label"].names[label.numpy()]) input_shape=(32, 32, 3))
opt_2 = Adam(learning_rate=0.001) layers.Dense(128, activation='relu', kernel_regularizer=L2(0.001)), datasets[0].map(scale).shuffle(1000).batch(batch_size) print("Predicted Label: %s" % # Add your own classification layers on top of the pre-trained model
model_2 = Sequential([layers.Input((28, 28)), layers.Lambda(lambda x: x / layers.Dropout(0.05), test_dataset_scaled = datasets[1].map(scale).batch(batch_size) info.features["label"].names[np.argmax(pred)]) model = Sequential()
255), layers.Flatten(), layers.Dense(128, activation='relu', kernel_regularizer=L2(0.001)), val_dataset_scaled = datasets[2].map(scale).batch(batch_size) for f0,f1 in datasets[1].map(scale).batch(200): model.add(base_model)
layers.Dense(32, activation='relu'), layers.Dense(10, activation='softmax')]) layers.Dropout(0.05), return train_dataset_scaled, test_dataset_scaled, val_dataset_scaled y=np.argmax(f1, axis=1) model.add(GlobalAveragePooling2D())
model_2.compile(optimizer=opt_2, layers.Dense(10, activation='softmax')]) train_dataset, test_dataset, val_dataset = get_dataset() y_pred=np.argmax(model.predict(f0),axis=1) model.add(Dense(256, activation='relu'))
loss='sparse_categorical_crossentropy', metrics=['accuracy']) model_5.compile(optimizer=opt_5, train_dataset.cache() print(tf.math.confusion_matrix(labels=y, predictions=y_pred, model.add(Dense(10, activation='softmax')) # 10 classes in CIFAR-10
model_2.fit(x=X_train, y=y_train, validation_data=(X_test, y_test), loss='sparse_categorical_crossentropy', metrics=['accuracy']) val_dataset.cache() num_classes=3)) # Freeze the pre-trained layers to avoid retraining
epochs=10) model_5.fit(x=X_train, y=y_train, validation_data=(X_test, y_test), len(list(datasets[0])) for layer in base_model.layers:
# Increasing the learning rate (Model 3) epochs=10) feature_extractor = "https://tfhub.dev/google/tf2- layer.trainable = False
opt_3 = Adam(learning_rate=0.00001) # Increase number of epochs preview/mobilenet_v2/feature_vector/4" # Compile the model
model_3 = Sequential([layers.Input((28, 28)), layers.Lambda(lambda x: x / model_5.fit(x=X_train, y=y_train, validation_data=(X_test, y_test), feature_extractor_layer = hub.KerasLayer(feature_extractor, model.compile(optimizer=Adam(), loss='categorical_crossentropy',
255), layers.Flatten(), epochs=10) input_shape=(224,224,3)) metrics=['accuracy'])
layers.Dense(32, activation='relu'), layers.Dense(10, activation='softmax')]) feature_extractor_layer.trainable = False # Train the model with transfer learning
9.Sentiment Analysis model.compile(loss='binary_crossentropy', optimizer='adam', 10.LSTM # Define the autoencoder model 11.Image Generation GAN batch_size = 128
from keras.datasets import imdb metrics=['accuracy']) #ex10modified autoencoder = Model(inputs, decoder) import numpy as np # Training loop
vocabulary_size = 5000 batch_size = 64 import numpy as np # Compile the model import matplotlib.pyplot as plt for epoch in range(epochs):
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words = num_epochs = 3 import pandas as pd autoencoder.compile(optimizer='adam', loss='mse') from tensorflow.keras.datasets import mnist noise = np.random.normal(0, 1, (batch_size, 100))
vocabulary_size) X_valid, y_valid = X_train[:batch_size], y_train[:batch_size] import tensorflow as tf # Train the autoencoder from tensorflow.keras.models import Sequential # Generate fake images from noise using the generator
print('Loaded dataset with {} training samples, {} test X_train2, y_train2 = X_train[batch_size:], y_train[batch_size:] from tensorflow.keras.layers import Input, LSTM, RepeatVector autoencoder.fit(X_train, X_train, epochs=50, batch_size=32, from tensorflow.keras.layers import Dense, Reshape, Flatten fake_images = generator.predict(noise)
samples'.format(len(X_train), len(X_test))) model.fit(X_train2, y_train2, validation_data=(X_valid, y_valid), from tensorflow.keras.models import Model validation_split=0.2) from tensorflow.keras.optimizers import Adam # Sample real images from the training data
print('---review---') batch_size=batch_size, from sklearn.preprocessing import StandardScaler # Generate predictions using the trained autoencoder # Load MNIST dataset (handwritten digits) idx = np.random.randint(0, train_images.shape[0], batch_size)
print(X_train[6]) epochs=num_epochs) # Load the data from 'JNJ.csv' decoded_data = autoencoder.predict(X_train) (train_images, _), (_, _) = mnist.load_data() real_images = train_images[idx]
print('---label---') scores = model.evaluate(X_test, y_test, verbose=0) df = pd.read_csv('JNJ.csv') # Perform inverse scaling to get the original data back # Normalize and reshape images # Combine real and fake images into a single batch
print(y_train[6]) print('Test accuracy:', scores[1]) data = df[['Close']].values # Assuming 'Close' column contains the data of decoded_data_original = train_images = (train_images.astype('float32') - 127.5) / 127.5 X = np.concatenate([real_images, fake_images])
word2id = imdb.get_word_index() interest scaler.inverse_transform(decoded_data.reshape(-1, input_dim)) train_images = np.expand_dims(train_images, axis=-1) # Labels for discriminator (1 for real images, 0 for fake images)
id2word = {i: word for word, i in word2id.items()} scaler = StandardScaler() # Show example output (first sequence) after training generator = Sequential([ y_discriminator = np.zeros(2 * batch_size)
print('---review with words---') scaled_data = scaler.fit_transform(data) print("Original Data:") Dense(256, input_shape=(100,), activation='relu'), y_discriminator[:batch_size] = 1
print([id2word.get(i, ' ') for i in X_train[6]]) # Define input sequence length and dimensionality print(data[:seq_length]) Dense(512, activation='relu'), # Train discriminator on the batch
print('---label---') seq_length = 10 print("\nReconstructed Data:") Dense(784, activation='tanh'), d_loss = discriminator.train_on_batch(X, y_discriminator)
print(y_train[6]) input_dim = scaled_data.shape[1] print(decoded_data_original[0]) Reshape((28, 28, 1))]) # Generate new noise for GAN training
print('Maximum review length: {}'.format( # Define LSTM units in the encoder # Define discriminator model noise = np.random.normal(0, 1, (batch_size, 100))
len(max((X_train + X_test), key=len)))) latent_dim = 3 discriminator = Sequential([ # Labels for GAN (1 to trick the discriminator)
print('Minimum review length: {}'.format( # Function to create sequences from data Flatten(input_shape=(28, 28, 1)), y_gan = np.ones(batch_size)
len(min((X_test + X_test), key=len)))) def create_sequences(data, seq_length): Dense(512, activation='relu'), # Train GAN (only updates the generator weights)
from keras.preprocessing import sequence sequences = [] Dense(256, activation='relu'), g_loss = gan.train_on_batch(noise, y_gan)
max_words = 500 for i in range(len(data) - seq_length + 1): Dense(1, activation='sigmoid')]) if epoch % 10 == 0:
X_train = sequence.pad_sequences(X_train, maxlen=max_words) sequence = data[i : i + seq_length] discriminator.compile(optimizer=Adam(learning_rate=0.0002, print(f'Epoch: {epoch}, Discriminator Loss: {d_loss[0]}, Generator Loss:
X_test = sequence.pad_sequences(X_test, maxlen=max_words) sequences.append(sequence) beta_1=0.5), loss='binary_crossentropy', {g_loss}')
from keras import Sequential return np.array(sequences) metrics=['accuracy']) # Generate and display some fake images
from keras.layers import Embedding, LSTM, Dense, Dropout # Create sequences for training the autoencoder # Define GAN model (generator -> discriminator) noise = np.random.normal(0, 1, (10, 100))
embedding_size=32 X_train = create_sequences(scaled_data, seq_length) gan = Sequential([generator, discriminator]) generated_images = generator.predict(noise)
model=Sequential() # Define input layer # Compile GAN (frozen discriminator, GAN's loss combines generator's loss plt.figure(figsize=(10, 10))
model.add(Embedding(vocabulary_size, embedding_size, inputs = Input(shape=(seq_length, input_dim)) and discriminator's loss) for i in range(10):
input_length=max_words)) # Encoder discriminator.trainable = False plt.subplot(1, 10, i + 1)
model.add(LSTM(100)) encoder = LSTM(latent_dim, return_sequences=False)(inputs) gan.compile(optimizer=Adam(learning_rate=0.0002, beta_1=0.5), plt.imshow(generated_images[i].reshape(28, 28), cmap='gray')
model.add(Dense(1, activation='sigmoid')) encoded = RepeatVector(seq_length)(encoder) loss='binary_crossentropy') plt.axis('off')
print(model.summary()) # Decoder # Training parameters plt.tight_layout()
decoder = LSTM(input_dim, return_sequences=True)(encoded) epochs = 100 plt.show()