0% found this document useful (0 votes)
10 views15 pages

Wild Fire CNN Accuracy 95

The document outlines a deep learning project focused on wildfire prediction using a convolutional neural network (CNN). It includes data loading, preprocessing, model building, training, and evaluation, with specific attention to handling image datasets and implementing callbacks for training. The model achieved a test accuracy of approximately 95.41% after training.

Uploaded by

adarshhalse45
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
10 views15 pages

Wild Fire CNN Accuracy 95

The document outlines a deep learning project focused on wildfire prediction using a convolutional neural network (CNN). It includes data loading, preprocessing, model building, training, and evaluation, with specific attention to handling image datasets and implementing callbacks for training. The model achieved a test accuracy of approximately 95.41% after training.

Uploaded by

adarshhalse45
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 15

import os

import time
import shutil
import pathlib
import itertools
from PIL import Image
import random

# import data handling tools


import cv2
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
# import Deep learning Libraries
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam, Adamax
from tensorflow.keras.metrics import categorical_crossentropy
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import load_img,
img_to_array
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten,
Dense, Activation, Dropout, BatchNormalization
from tensorflow.keras import regularizers

# Ignore Warnings
import warnings
warnings.filterwarnings("ignore")

print ('modules loaded')

modules loaded

# Define the directories


train_dir = '/kaggle/input/wildfire-prediction-dataset/train' #
Replace with your train directory
valid_dir = '/kaggle/input/wildfire-prediction-dataset/valid' #
Replace with your validation directory
test_dir = '/kaggle/input/wildfire-prediction-dataset/test' #
Replace with your test directory

# Function to display one random sample from each class


def display_random_sample(dataset_dir):
classes = os.listdir(dataset_dir) # List of class directories
plt.figure(figsize=(15, 10)) # Adjust the figure size if needed
for i, class_name in enumerate(classes):
class_dir = os.path.join(dataset_dir, class_name)
image_files = os.listdir(class_dir) # Get all images in the
class folder
random_image = random.choice(image_files) # Select a random
image
img_path = os.path.join(class_dir, random_image)

# Load the image with its original size


img = load_img(img_path)

# Display the image


plt.subplot(1, len(classes), i + 1) # Display images in a
single row
plt.imshow(img)
plt.axis('off') # Turn off axis for better visualization
plt.title(f"{class_name} ({img.size[0]}x{img.size[1]})") #
Show class name and image size

plt.show()

# Display one random sample from train, validation, and test sets
print("Training set samples:")
display_random_sample(train_dir)

print("Validation set samples:")


display_random_sample(valid_dir)

print("Test set samples:")


display_random_sample(test_dir)

Training set samples:


Validation set samples:

Test set samples:

dir = '/kaggle/input/wildfire-prediction-dataset/train'
x_train = []
y_train = []
for direct in os.listdir(dir):
print("Loading dataset training {}".format(direct))
for filename in os.listdir(os.path.join(dir,direct)):
img_path = os.path.join(dir,direct,filename)
img = cv2.imread(img_path)
img = cv2.resize(img, (32,32))
img = np.array(img)
img = img/255
x_train.append(img)
y_train.append(direct)

Loading dataset training wildfire


Loading dataset training nowildfire

Premature end of JPEG file

dir_val = '/kaggle/input/wildfire-prediction-dataset/valid'
x_val=[]
y_val=[]
for direct in os.listdir(dir_val):
print("Loading dataset validation {}".format(direct))
for filename in os.listdir(os.path.join(dir_val,direct)):
img_path = os.path.join(dir_val,direct,filename)
image = cv2.imread(img_path)
image = cv2.resize(image,(32,32))
image = np.array(image)
image = image/255
x_val.append(image)
y_val.append(direct)

Loading dataset validation wildfire


Loading dataset validation nowildfire

dir_test = '/kaggle/input/wildfire-prediction-dataset/test'
x_test=[]
y_test=[]
for direct in os.listdir(dir_test):
print("Loading dataset test {}".format(direct))
for filename in os.listdir(os.path.join(dir_test,direct)):
img_path = os.path.join(dir_test,direct,filename)
image = cv2.imread(img_path)
image = cv2.resize(image,(32,32))
image = np.array(image)
image = image/255
x_test.append(image)
y_test.append(direct)

Loading dataset test wildfire

Premature end of JPEG file

Loading dataset test nowildfire


x_train = np.array(x_train)
x_val = np.array(x_val)
x_test = np.array(x_test)

y_train[30000]

'nowildfire'

# Replace "wildfire" with 1 and "nowildfire" with 0


y_train = [1 if label == 'wildfire' else 0 for label in y_train]
y_val = [1 if label == 'wildfire' else 0 for label in y_val]
y_test = [1 if label == 'wildfire' else 0 for label in y_test]

y_train = np.array(y_train)
y_val = np.array(y_val)
y_test = np.array(y_test)

len(x_train[4][4])

32

# Now check the shape of your datasets


print("x_train shape:", x_train.shape) # Should be (num_samples,
height, width, num_channels)
print("x_valid shape:", x_val.shape)
print("x_test shape:", x_test.shape)

print("y_train shape:", y_train.shape)


print("y_valid shape:", y_val.shape)
print("y_test shape:", y_test.shape)

x_train shape: (30250, 32, 32, 3)


x_valid shape: (6300, 32, 32, 3)
x_test shape: (6300, 32, 32, 3)
y_train shape: (30250,)
y_valid shape: (6300,)
y_test shape: (6300,)

# Step 1: Build the CNN model


model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
Conv2D(64, (3, 3), activation='relu', padding='same'),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
MaxPooling2D((2, 2)),

Conv2D(128, (3, 3), activation='relu', padding='same'),


Conv2D(128, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
MaxPooling2D((2, 2)),
Conv2D(256, (3, 3), activation='relu', padding='same'),
Conv2D(256, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
MaxPooling2D((2, 2)),

Conv2D(128, (3, 3), activation='relu', padding='same'),


Conv2D(128, (3, 3), activation='relu', padding='same'),

Flatten(),

Dense(128, activation='relu'),
Dense(128, activation='relu'),
BatchNormalization(),
Dropout(0.5),

Dense(64, activation='relu'),
Dense(32, activation='relu'),
BatchNormalization(),

Dense(1, activation='sigmoid') # Output layer with number of


classes
])

# Step 2: Compile the model


model.compile(optimizer='adam',
loss='binary_crossentropy', # Loss function for multi-
class classification
metrics=['accuracy'])

model.summary()

Model: "sequential_20"

┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━
━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━
━━━━━┩
│ conv2d_133 (Conv2D) │ (None, 30, 30, 32) │
896 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ conv2d_134 (Conv2D) │ (None, 30, 30, 64) │
18,496 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ conv2d_135 (Conv2D) │ (None, 30, 30, 64) │
36,928 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ batch_normalization_59 │ (None, 30, 30, 64) │
256 │
│ (BatchNormalization) │ │

├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ max_pooling2d_42 (MaxPooling2D) │ (None, 15, 15, 64) │
0 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ conv2d_136 (Conv2D) │ (None, 15, 15, 128) │
73,856 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ conv2d_137 (Conv2D) │ (None, 15, 15, 128) │
147,584 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ batch_normalization_60 │ (None, 15, 15, 128) │
512 │
│ (BatchNormalization) │ │

├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ max_pooling2d_43 (MaxPooling2D) │ (None, 7, 7, 128) │
0 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ conv2d_138 (Conv2D) │ (None, 7, 7, 256) │
295,168 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ conv2d_139 (Conv2D) │ (None, 7, 7, 256) │
590,080 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ batch_normalization_61 │ (None, 7, 7, 256) │
1,024 │
│ (BatchNormalization) │ │

├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ max_pooling2d_44 (MaxPooling2D) │ (None, 3, 3, 256) │
0 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ conv2d_140 (Conv2D) │ (None, 3, 3, 128) │
295,040 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ conv2d_141 (Conv2D) │ (None, 3, 3, 128) │
147,584 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ flatten_20 (Flatten) │ (None, 1152) │
0 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ dense_78 (Dense) │ (None, 128) │
147,584 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ dense_79 (Dense) │ (None, 128) │
16,512 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ batch_normalization_62 │ (None, 128) │
512 │
│ (BatchNormalization) │ │

├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ dropout (Dropout) │ (None, 128) │
0 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ dense_80 (Dense) │ (None, 64) │
8,256 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ dense_81 (Dense) │ (None, 32) │
2,080 │
├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ batch_normalization_63 │ (None, 32) │
128 │
│ (BatchNormalization) │ │

├─────────────────────────────────┼────────────────────────┼──────────
─────┤
│ dense_82 (Dense) │ (None, 1) │
33 │
└─────────────────────────────────┴────────────────────────┴──────────
─────┘

Total params: 1,782,529 (6.80 MB)

Trainable params: 1,781,313 (6.80 MB)


Non-trainable params: 1,216 (4.75 KB)

from tensorflow.keras.callbacks import EarlyStopping

# Step 3: Train the model


history = model.fit(
x_train, y_train,
validation_data=(x_val, y_val),
epochs=20,
batch_size=64,
callbacks=[EarlyStopping(monitor='val_loss', patience=4)]
)

Epoch 1/20
473/473 ━━━━━━━━━━━━━━━━━━━━ 20s 23ms/step - accuracy: 0.8795 - loss:
0.2910 - val_accuracy: 0.6083 - val_loss: 1.1794
Epoch 2/20
473/473 ━━━━━━━━━━━━━━━━━━━━ 6s 13ms/step - accuracy: 0.9252 - loss:
0.1913 - val_accuracy: 0.9024 - val_loss: 0.2299
Epoch 3/20
473/473 ━━━━━━━━━━━━━━━━━━━━ 6s 12ms/step - accuracy: 0.9321 - loss:
0.1734 - val_accuracy: 0.5735 - val_loss: 0.9520
Epoch 4/20
473/473 ━━━━━━━━━━━━━━━━━━━━ 6s 12ms/step - accuracy: 0.9377 - loss:
0.1574 - val_accuracy: 0.8211 - val_loss: 0.3225
Epoch 5/20
473/473 ━━━━━━━━━━━━━━━━━━━━ 6s 12ms/step - accuracy: 0.9442 - loss:
0.1464 - val_accuracy: 0.9543 - val_loss: 0.1255
Epoch 6/20
473/473 ━━━━━━━━━━━━━━━━━━━━ 6s 12ms/step - accuracy: 0.9477 - loss:
0.1359 - val_accuracy: 0.9540 - val_loss: 0.1189
Epoch 7/20
473/473 ━━━━━━━━━━━━━━━━━━━━ 6s 13ms/step - accuracy: 0.9456 - loss:
0.1413 - val_accuracy: 0.9365 - val_loss: 0.1692
Epoch 8/20
473/473 ━━━━━━━━━━━━━━━━━━━━ 6s 12ms/step - accuracy: 0.9511 - loss:
0.1278 - val_accuracy: 0.9525 - val_loss: 0.1287
Epoch 9/20
473/473 ━━━━━━━━━━━━━━━━━━━━ 6s 13ms/step - accuracy: 0.9564 - loss:
0.1186 - val_accuracy: 0.8160 - val_loss: 0.3986
Epoch 10/20
473/473 ━━━━━━━━━━━━━━━━━━━━ 6s 12ms/step - accuracy: 0.9560 - loss:
0.1141 - val_accuracy: 0.9478 - val_loss: 0.1521

# Evaluate the model on test data


test_loss, test_accuracy = model.evaluate(x_test, y_test)

print(f'Test Accuracy: {test_accuracy:.4f}')


197/197 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9470 - loss:
0.1800
Test Accuracy: 0.9541

# Plot training & validation accuracy values


plt.plot(history.history['accuracy'], label='Train Accuracy')
plt.plot(history.history['val_accuracy'], label='Test Accuracy')
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(loc='upper left')
plt.show()

# Plot training & validation loss values


plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Test Loss')
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(loc='upper left')
plt.show()
# Predict on the datasets
y_train_pred = model.predict(x_train)
y_val_pred = model.predict(x_val)
y_test_pred = model.predict(x_test)

# Convert probabilities to binary classes if using a classification


model
y_train_pred_classes = (y_train_pred > 0.5).astype("int32")
y_val_pred_classes = (y_val_pred > 0.5).astype("int32")
y_test_pred_classes = (y_test_pred > 0.5).astype("int32")

946/946 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step


197/197 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
197/197 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step

print("Confusion matrix for train: \n")


cm = confusion_matrix(y_train, y_train_pred_classes)
sns.heatmap(cm, annot=True)
plt.title(f"Confusion Matrix")
plt.xlabel("Predicted")
plt.ylabel("True")
plt.show()

Confusion matrix for train:

from sklearn.metrics import classification_report

print(f"classification report for train : \


n{classification_report(y_train, y_train_pred_classes)}")

classification report for train :


precision recall f1-score support

0 0.95 0.96 0.96 14500


1 0.97 0.95 0.96 15750

accuracy 0.96 30250


macro avg 0.96 0.96 0.96 30250
weighted avg 0.96 0.96 0.96 30250
print("Confusion matrix for valid: \n")
cm = confusion_matrix(y_val, y_val_pred_classes)
sns.heatmap(cm, annot=True)
plt.title(f"Confusion Matrix")
plt.xlabel("Predicted")
plt.ylabel("True")
plt.show()

Confusion matrix for valid:

print(f"classification report for valid : \


n{classification_report(y_val, y_val_pred_classes)}")

classification report for valid :


precision recall f1-score support

0 0.93 0.96 0.94 2820


1 0.97 0.94 0.95 3480

accuracy 0.95 6300


macro avg 0.95 0.95 0.95 6300
weighted avg 0.95 0.95 0.95 6300

print("Confusion matrix for test: \n")


cm = confusion_matrix(y_test, y_test_pred_classes)
sns.heatmap(cm, annot=True)
plt.title(f"Confusion Matrix")
plt.xlabel("Predicted")
plt.ylabel("True")
plt.show()

Confusion matrix for test:

print(f"classification report for test : \


n{classification_report(y_test, y_test_pred_classes)}")

classification report for test :


precision recall f1-score support

0 0.93 0.97 0.95 2820


1 0.97 0.94 0.96 3480
accuracy 0.95 6300
macro avg 0.95 0.96 0.95 6300
weighted avg 0.95 0.95 0.95 6300

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy