Wildfires are an important phenomenon on a global scale, as they are responsible for large amounts of economic and environmental damage. These effects are being exacerbated by the influence of climate change.¶
It is important to detect fire and warn the people in charge. So we will create a Fire Detection App with deep learning.¶
In [1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, callbacks, optimizers
import os
import random
from PIL import Image
Dataset¶
Dataset curated from Forest_fires_classification.¶
Used this script to resize images to (45x45) and compress the dataset¶
import os, cv2, glob
import multiprocessing as mp
def resize(file_path):
image = cv2.imread(file_path)
image = cv2.resize(image, (45, 45))
target_path = os.path.splitext(file_path)[0] + ".jpg"
cv2.imwrite(target_path, image)
print("Resized '{}' to '{}'".format(file_path, target_path))
return
def main():
image_dir = "forest_fire_dataset"
file_paths = []
file_paths += glob.glob(image_dir+"/**/*.png", recursive = True)
file_paths += glob.glob(image_dir+"/**/*.jpg", recursive = True)
pool = mp.Pool(processes = (mp.cpu_count()*50))
pool.map(resize, file_paths)
if __name__=="__main__":
main()
Downloading the curated Dataset¶
In [2]:
data_dir = "forest_fire"
if not os.path.exists(data_dir):
!wget https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/forest_fire.zip -O forest_fire.zip
!unzip -qo forest_fire.zip
!rm forest_fire.zip
In [3]:
tf.random.set_seed(50)
The dataset folder has two sub-folders - person and notperson containing images of respective types.
In [4]:
print("Number of samples")
for f in os.listdir(data_dir + '/'):
if os.path.isdir(data_dir + '/' + f):
print(f, " : ", len(os.listdir(data_dir + '/' + f +'/')))
It is a balanced dataset.
In [5]:
batch_size = 64
image_size = (45, 45)
print("Training set")
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
image_size=image_size,
validation_split=0.2,
subset="training",
seed=113,
batch_size=batch_size)
print("Validation set")
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
image_size=image_size,
validation_split=0.2,
subset="validation",
seed=113,
batch_size=batch_size)
Define the class_names for use later.
In [6]:
class_names = train_ds.class_names
print(class_names)
Visualization¶
In [7]:
num_samples = 10 # the number of samples to be displayed in each class
for x in class_names:
plt.figure(figsize=(20, 20))
filenames = os.listdir(os.path.join(data_dir, x))
for i in range(num_samples):
ax = plt.subplot(1, num_samples, i + 1)
img = Image.open(os.path.join(data_dir, x, filenames[i]))
plt.imshow(img)
plt.title(x)
plt.axis("off")
In [8]:
print("Shape of one training batch")
for image_batch, labels_batch in train_ds:
input_shape = image_batch[0].shape
print("Input: ", image_batch.shape)
print("Labels: ", labels_batch.shape)
print("Input Shape: ", input_shape)
break
In [9]:
# Pre-fetch images into memeory
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
Normalizing the pixel values¶
Pixel values are now integers between 0 and 255. Changing them to the range [0, 1] for faster convergence.
In [10]:
# Normalizing the pixel values
normalization_layer = layers.experimental.preprocessing.Rescaling(1./255)
train_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
val_ds = val_ds.map(lambda x, y: (normalization_layer(x), y))
Augmenting dataset¶
Augmenting images in the train set to increase dataset size
The model¶
In [11]:
filepath = 'forest_fire.h5'
model = tf.keras.models.Sequential([
layers.Conv2D(6, 3, activation='relu', input_shape=input_shape),
layers.MaxPool2D(pool_size=(2, 2)),
layers.Conv2D(12, 3, activation='relu'),
layers.MaxPool2D(pool_size=(2, 2)),
# layers.Conv2D(32, 3, activation='relu'),
# layers.MaxPool2D(pool_size=(2, 2)),
layers.Conv2D(6, 3, activation='relu'),
layers.MaxPool2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dense(32, activation='relu'),
layers.Dense(16, activation='relu'),
layers.Dense(1, activation='sigmoid')
])
model.summary()
In [12]:
cb = [
callbacks.EarlyStopping(monitor = 'val_loss', patience = 10, restore_best_weights = True),
callbacks.ModelCheckpoint(filepath, monitor = "val_loss", save_best_only = True)
]
model.compile(loss=tf.keras.losses.BinaryCrossentropy(),
optimizer=optimizers.Adam(0.0001),
metrics=['accuracy'])
history = model.fit(train_ds, validation_data = val_ds, epochs=200, callbacks = cb)
In [13]:
model.evaluate(val_ds)
Out[13]:
Plotting the metrics¶
In [14]:
def plot(history, variable, variable2):
plt.plot(range(len(history[variable])), history[variable])
plt.plot(range(len(history[variable2])), history[variable2])
plt.title(variable)
In [15]:
plot(history.history, "accuracy", 'val_accuracy')
In [16]:
plot(history.history, "loss", "val_loss")
TF Model Prediction¶
In [17]:
def predict(x):
for i in val_ds.as_numpy_iterator():
img, label = i
# plt.figure()
# plt.axis('off') # remove axes
# plt.imshow(img[x]) # shape from (32, 64, 64, 3) --> (64, 64, 3)
print("True: ", class_names[label[x]])
output = model.predict(np.expand_dims(img[x],0))[0][0] # getting output; input shape (64, 64, 3) --> (1, 64, 64, 3)
pred = (output > 0.5).astype('int')
print("Predicted: ", class_names[pred]) # Picking the label from class_names base don the model output
probability = (output*100) if pred==1 else (100 - (output*100))
if (class_names[pred] == class_names[label[x]]):
print("PREDICTION MATCHED | Probability: {:.2f}%".format(probability))
else:
print("PREDICTION DID NOT MATCH | Probability: {:.2f}%".format(probability))
break
In [18]:
for i in range(10):
# pick random test data sample from one batch
x = random.randint(0, batch_size - 1)
predict(x)
print()
deepCC for Ubuntu¶
In [19]:
!deepCC forest_fire.h5 --debug
deepSea vs TF Model Prediction¶
In [20]:
def compare(x):
for i in val_ds.as_numpy_iterator():
img, label = i
print("True: ", class_names[label[x]])
tf_output = model.predict(np.expand_dims(img[x],0))[0][0] # getting output; input shape (64, 64, 3) --> (1, 64, 64, 3)
tf_pred = (tf_output > 0.5).astype('int')
tf_probability = (tf_output*100) if tf_pred==1 else (100 - (tf_output*100))
print("Predicted [Tensorflow]: {} | Probability: {:.2f}%".format(class_names[tf_pred], tf_probability))
np.savetxt('sample.data', img[x].flatten())
!forest_fire_deepC/forest_fire.exe sample.data &> /dev/null
dc_output = np.loadtxt('deepSea_result_1.out')
dc_pred = (dc_output > 0.5).astype('int')
dc_probability = (dc_output*100) if dc_pred==1 else (100 - (dc_output*100))
print("Predicted [deepSea]: {} | Probability: {:.2f}%".format(class_names[dc_pred], dc_probability))
if (tf_pred == dc_pred):
print("TensorFlow and DeepSea prediction matched.")
else:
print("TensorFlow and DeepSea prediction did not match.")
break
In [21]:
for i in range(10):
# pick random test data sample from one batch
x = random.randint(0, batch_size - 1)
compare(x)
print()
deepCC for Arduino Nano 33 BLE Sense¶
In [22]:
!deepCC forest_fire.h5 --board="Arduino Nano 33 BLE Sense" --debug --archive --bundle