0% found this document useful (0 votes)
12 views6 pages

Import As From Import From Import From Import From Import: # Load The IMDB Dataset

Uploaded by

shivavibe2002
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
12 views6 pages

Import As From Import From Import From Import From Import: # Load The IMDB Dataset

Uploaded by

shivavibe2002
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 6

import tensorflow as tf

from tensorflow.keras.datasets import imdb


from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense

# Load the IMDB dataset


max_features = 10000
maxlen = 500
(x_train, y_train), (x_test, y_test) =
imdb.load_data(num_words=max_features)

# Pad sequences to ensure uniform length


x_train = pad_sequences(x_train, maxlen=maxlen)
x_test = pad_sequences(x_test, maxlen=maxlen)

# Build the LSTM model


model = Sequential([
Embedding(max_features, 128, input_length=maxlen),
LSTM(128),
Dense(1, activation='sigmoid')
])

# Compile the model


model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])

# Train the model


model.fit(x_train, y_train, epochs=10, batch_size=32,
validation_split=0.2)

# Evaluate the model


test_loss, test_acc = model.evaluate(x_test, y_test)
print(f'Test Accuracy: {test_acc:.2f}')

D:\DLLL\DLL\Lib\site-packages\keras\src\layers\core\embedding.py:90:
UserWarning: Argument `input_length` is deprecated. Just remove it.
warnings.warn(

Epoch 1/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 163s 258ms/step - accuracy: 0.6963 -
loss: 0.5585 - val_accuracy: 0.7590 - val_loss: 0.4950
Epoch 2/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 181s 289ms/step - accuracy: 0.8687 -
loss: 0.3289 - val_accuracy: 0.7824 - val_loss: 0.4621
Epoch 3/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 202s 323ms/step - accuracy: 0.8563 -
loss: 0.3466 - val_accuracy: 0.8144 - val_loss: 0.4629
Epoch 4/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 209s 333ms/step - accuracy: 0.9179 -
loss: 0.2130 - val_accuracy: 0.8636 - val_loss: 0.3667
Epoch 5/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 186s 297ms/step - accuracy: 0.9516 -
loss: 0.1388 - val_accuracy: 0.8716 - val_loss: 0.3701
Epoch 6/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 170s 272ms/step - accuracy: 0.9719 -
loss: 0.0874 - val_accuracy: 0.8712 - val_loss: 0.4377
Epoch 7/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 201s 269ms/step - accuracy: 0.9821 -
loss: 0.0591 - val_accuracy: 0.8718 - val_loss: 0.5070
Epoch 8/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 182s 291ms/step - accuracy: 0.9894 -
loss: 0.0364 - val_accuracy: 0.8634 - val_loss: 0.5404
Epoch 9/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 187s 299ms/step - accuracy: 0.9904 -
loss: 0.0304 - val_accuracy: 0.8682 - val_loss: 0.6021
Epoch 10/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 191s 306ms/step - accuracy: 0.9943 -
loss: 0.0199 - val_accuracy: 0.8616 - val_loss: 0.7191
782/782 ━━━━━━━━━━━━━━━━━━━━ 83s 105ms/step - accuracy: 0.8562 - loss:
0.7390
Test Accuracy: 0.86

import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler

# Generate a binary classification dataset


X, y = make_classification(n_samples=1000, n_features=20, n_classes=2,
random_state=1)

# Split the data into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3, random_state=1)

# Standardize the features


sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)

# Build the neural network model


model = Sequential([
Dense(64, input_dim=20, activation='relu'),
Dense(64, activation='relu'),
Dense(1, activation='sigmoid')
])
# Compile the model with different optimizers
optimizers = ['sgd', 'adam', 'rmsprop']

for opt in optimizers:


print(f'\nTraining with {opt} optimizer:')
model.compile(optimizer=opt, loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(X_train, y_train, epochs=50, batch_size=10, verbose=0)
loss, accuracy = model.evaluate(X_test, y_test, verbose=0)
print(f'Test Accuracy with {opt}: {accuracy:.2f}')

D:\DLLL\DLL\Lib\site-packages\keras\src\layers\core\dense.py:87:
UserWarning: Do not pass an `input_shape`/`input_dim` argument to a
layer. When using Sequential models, prefer using an `Input(shape)`
object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)

Training with sgd optimizer:


Test Accuracy with sgd: 0.82

Training with adam optimizer:


Test Accuracy with adam: 0.80

Training with rmsprop optimizer:


Test Accuracy with rmsprop: 0.81

import tensorflow as tf
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense

# Load the IMDB dataset


max_features = 10000
maxlen = 500
(x_train, y_train), (x_test, y_test) =
imdb.load_data(num_words=max_features)

# Pad sequences to ensure uniform length


x_train = pad_sequences(x_train, maxlen=maxlen)
x_test = pad_sequences(x_test, maxlen=maxlen)

# Build the LSTM model


model = Sequential([
Embedding(max_features, 128, input_length=maxlen),
LSTM(128),
Dense(1, activation='sigmoid')
])
# Compile the model
model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])

# Train the model


model.fit(x_train, y_train, epochs=10, batch_size=32,
validation_split=0.2)

# Evaluate the model


test_loss, test_acc = model.evaluate(x_test, y_test)
print(f'Test Accuracy: {test_acc:.2f}')

Epoch 1/10

D:\DLLL\DLL\Lib\site-packages\keras\src\layers\core\embedding.py:90:
UserWarning: Argument `input_length` is deprecated. Just remove it.
warnings.warn(

625/625 ━━━━━━━━━━━━━━━━━━━━ 169s 267ms/step - accuracy: 0.7065 -


loss: 0.5440 - val_accuracy: 0.6872 - val_loss: 0.5724
Epoch 2/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 183s 293ms/step - accuracy: 0.8552 -
loss: 0.3388 - val_accuracy: 0.8298 - val_loss: 0.3970
Epoch 3/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 182s 291ms/step - accuracy: 0.9208 -
loss: 0.2070 - val_accuracy: 0.8470 - val_loss: 0.3637
Epoch 4/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 181s 289ms/step - accuracy: 0.9508 -
loss: 0.1445 - val_accuracy: 0.8616 - val_loss: 0.3800
Epoch 5/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 183s 293ms/step - accuracy: 0.9447 -
loss: 0.1425 - val_accuracy: 0.8224 - val_loss: 0.4000
Epoch 6/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 200s 319ms/step - accuracy: 0.9508 -
loss: 0.1319 - val_accuracy: 0.8790 - val_loss: 0.4194
Epoch 7/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 195s 308ms/step - accuracy: 0.9822 -
loss: 0.0574 - val_accuracy: 0.8740 - val_loss: 0.4915
Epoch 8/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 183s 292ms/step - accuracy: 0.9894 -
loss: 0.0340 - val_accuracy: 0.8678 - val_loss: 0.5030
Epoch 9/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 2564s 4s/step - accuracy: 0.9942 - loss:
0.0210 - val_accuracy: 0.8622 - val_loss: 0.5399
Epoch 10/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 175s 280ms/step - accuracy: 0.9969 -
loss: 0.0116 - val_accuracy: 0.8708 - val_loss: 0.6336
782/782 ━━━━━━━━━━━━━━━━━━━━ 81s 104ms/step - accuracy: 0.8637 - loss:
0.6789
Test Accuracy: 0.87
from sklearn.preprocessing import OneHotEncoder
import numpy as np

# Example categorical data


categories = np.array(['apple', 'banana', 'orange', 'banana',
'orange', 'apple']).reshape(-1, 1)

# Apply One-Hot Encoding


encoder = OneHotEncoder(sparse_output=False)
one_hot_encoded = encoder.fit_transform(categories)

print('Original Categories:', categories.flatten())


print('One-Hot Encoded:', one_hot_encoded)

Original Categories: ['apple' 'banana' 'orange' 'banana' 'orange'


'apple']
One-Hot Encoded: [[1. 0. 0.]
[0. 1. 0.]
[0. 0. 1.]
[0. 1. 0.]
[0. 0. 1.]
[1. 0. 0.]]

import cv2

from matplotlib import pyplot as plt


import urllib.request

img=cv2.imread("image.jpg")
imggray= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgrgb= cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

stop_data= cv2.CascadeClassifier('stop_sign.xml')
found =stop_data.detectMultiScale(imggray,minSize= (20, 20))

amount_found = len(found)

if amount_found !=0:
for (x, y, width, height) in found:
cv2.rectangle(imgrgb, (x, y), (x+ height, y +width), (0, 255,
0), 5)

plt.subplot(1, 1, 1)
plt.imshow(imgrgb)

plt.show()

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy