Cainvas

Elephant Detection App

Credit: AITS Cainvas Community

Photo by Lobster on Dribbble

Downloaded the dataset from GitHub. Credit - Human Wildlife Conflict.

!git clone "https://github.com/arribada/human-wildlife-conflict.git"

Viewing dataset

def view_dataset(path):
    print("Number of samples- ")
    for f in os.listdir(data_dir):
        curr_dir = os.path.join(data_dir, f)
        if os.path.isdir(curr_dir):
            print(f, "-", len(os.listdir(curr_dir)))

data_dir = 'human-wildlife-conflict/Elephant/Object/'
view_dataset(data_dir)
Number of samples- 
human_and_elephant - 807
human - 1502
multiple_obstructing_elephants - 2698
multiple_separate_elephants - 1858
goat - 114
single_elephant - 7615

We will now create a new dataset with classifications of elephant and non-elephant

dirs_containing_elephants = []
dirs_not_containing_elephants = []

for f in os.listdir(data_dir):
    curr_dir = os.path.join(data_dir, f)
    if os.path.isdir(curr_dir):
        if "elephant" in f:
            dirs_containing_elephants.append(curr_dir)
        else:
            dirs_not_containing_elephants.append(curr_dir)

elephant_dataset = "elephant_dataset"
elephant_dir = os.path.join(elephant_dataset, "elephant")
not_elephant_dir = os.path.join(elephant_dataset, "not_elephant")

if os.path.isdir(elephant_dataset):
    shutil.rmtree(elephant_dataset)
os.makedirs(elephant_dir)
os.makedirs(not_elephant_dir)

for dir in dirs_containing_elephants:
    for file in os.listdir(dir):
        shutil.copy2(os.path.join(dir, file), elephant_dir)

for dir in dirs_not_containing_elephants:
    for file in os.listdir(dir):
        shutil.copy2(os.path.join(dir, file), not_elephant_dir)

Let us recheck our dataset

data_dir = 'elephant_dataset/'
view_dataset(data_dir)
Number of samples- 
elephant - 12114
not_elephant - 1616

Downloading the above created dataset

In [1]:
!wget -N "https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/elephant_dataset.zip" -O elephant_dataset.zip
!unzip -qo elephant_dataset.zip
!rm elephant_dataset.zip
WARNING: timestamping does nothing in combination with -O. See the manual
for details.

--2021-07-12 20:12:29--  https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/elephant_dataset.zip
Resolving cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)... 52.219.160.23
Connecting to cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)|52.219.160.23|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 78090093 (74M) [application/zip]
Saving to: ‘elephant_dataset.zip’

elephant_dataset.zi 100%[===================>]  74.47M  94.9MB/s    in 0.8s    

2021-07-12 20:12:30 (94.9 MB/s) - ‘elephant_dataset.zip’ saved [78090093/78090093]

Import necessary libraries

In [2]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers, callbacks, optimizers
from sklearn.metrics import confusion_matrix, f1_score
from tensorflow import keras 
import os, shutil
import random
from PIL import Image
In [3]:
data_dir = 'elephant_dataset/'

batch_size = 64
# image_size = (32, 32)
image_size = (28, 28)

print("Training set")
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
                data_dir,
                validation_split=0.2,
                subset="training",
                color_mode="grayscale",
                image_size=image_size, 
                seed=113,
                shuffle=True,
                batch_size=batch_size
            )

print("Validation set")
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
                data_dir,
                validation_split=0.2,
                subset="validation",
                color_mode="grayscale",
                image_size=image_size, 
                seed=113,
                shuffle=True,
                batch_size=batch_size
            )
Training set
Found 13730 files belonging to 2 classes.
Using 10984 files for training.
Validation set
Found 13730 files belonging to 2 classes.
Using 2746 files for validation.
In [4]:
class_names = train_ds.class_names
print(class_names)
['elephant', 'not_elephant']
In [5]:
Xtrain = np.empty((0,*image_size,1))
ytrain = np.empty((0,1))

for x in train_ds.enumerate():
    for y in x[1][0]:  
        Xtrain = np.append(Xtrain, np.expand_dims(np.array(y),0), axis = 0)
    #print(Xtrain.shape)
    ytrain = np.append(ytrain, np.array(x[1][1]))
    #print(ytrain.shape)
    
Xtrain.shape, ytrain.shape
Out[5]:
((10984, 28, 28, 1), (10984,))

Augmenting images in the train set to increase dataset size

In [6]:
# # Augmenting images in the train set to increase dataset size

data_augmentation = tf.keras.Sequential(
    [
        layers.experimental.preprocessing.RandomFlip("horizontal"),    # Flip along vertical axes
        layers.experimental.preprocessing.RandomZoom(0.1),    # Randomly zoom images in dataset
    ])


print("Train size (number of samples) before augmentation: ", len(Xtrain))

aug_sample_count = ytrain.tolist().count(float(0.0))//2 - ytrain.tolist().count(float(1.0))
cur_augmented = 0
# Apply only to train set
while(cur_augmented!=aug_sample_count):
    for i in range(len(Xtrain)):
        if ytrain[i] == 1: # not elephant
            aug_image = np.array(data_augmentation(np.expand_dims(Xtrain[0], 0)))
            Xtrain = np.append(Xtrain, aug_image.reshape((1, *image_size, 1)), axis = 0)
            ytrain = np.append(ytrain, [1])
            
            cur_augmented += 1
            if (cur_augmented == aug_sample_count):
                break

    
print("Size (number of samples) of final dataset: ", len(Xtrain))

print(" Dataset shapes: ", Xtrain.shape, ytrain.shape)

#Adding to train_ds
#train_ds = train_ds.concatenate(aug_ds)

#print("Train size (number of batches) after augmentation: ", len(train_ds))
Train size (number of samples) before augmentation:  10984
WARNING:tensorflow:Layer random_flip is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2.  The layer has dtype float32 because its dtype defaults to floatx.

If you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.

To change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.

Size (number of samples) of final dataset:  14538
 Dataset shapes:  (14538, 28, 28, 1) (14538,)
In [7]:
print("Number of samples - ")
for i in range(len(class_names)):
    print(class_names[i], "-", ytrain.tolist().count(float(i)))
Number of samples - 
elephant - 9692
not_elephant - 4846
In [8]:
Xval = np.empty((0,*image_size,1))
yval = np.empty((0,1))

for x in val_ds.enumerate():
    for y in x[1][0]:  
        Xval = np.append(Xval, np.expand_dims(np.array(y),0), axis = 0)
    #print(Xtrain.shape)
    yval = np.append(yval, np.array(x[1][1]))
    #print(ytrain.shape)
    
Xval.shape, yval.shape
Out[8]:
((2746, 28, 28, 1), (2746,))
In [9]:
print("Number of samples - ")
for i in range(len(class_names)):
    print(class_names[i], "-", yval.tolist().count(float(i)))
Number of samples - 
elephant - 2422
not_elephant - 324

Visualization

In [10]:
num_samples = 4    # the number of samples to be displayed in each class

for x in class_names:
    plt.figure(figsize=(10, 10))

    filenames = os.listdir(data_dir + x)

    for i in range(num_samples):
        ax = plt.subplot(1, num_samples, i + 1)
        img = Image.open(os.path.join(data_dir, x, filenames[i]))
        plt.imshow(img)
        plt.title(x)
        plt.axis("off")

Normalizing the pixel values

Pixel values are now integers between 0 and 255. Changing them to the range [0, 1] for faster convergence.

In [11]:
Xtrain = Xtrain/255
Xval = Xval/255

The Model

In [12]:
model = keras.models.Sequential([
    layers.Conv2D(8, 3, activation='relu', input_shape=Xtrain[0].shape),
    layers.MaxPool2D(pool_size=(2, 2)),
    
    layers.Conv2D(16, 3, activation='relu'),
    layers.MaxPool2D(pool_size=(2, 2)),
    
    layers.Conv2D(32, 3, activation='relu'),
    layers.MaxPool2D(pool_size=(2, 2)),
    
    layers.Flatten(),
    layers.Dense(32, activation='relu'),
    layers.Dense(1, activation='sigmoid')
])

cb = [callbacks.EarlyStopping(monitor = 'val_loss', patience = 5, restore_best_weights = True)]
In [13]:
model.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 26, 26, 8)         80        
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 13, 13, 8)         0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 11, 11, 16)        1168      
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 5, 5, 16)          0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 3, 3, 32)          4640      
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 1, 1, 32)          0         
_________________________________________________________________
flatten (Flatten)            (None, 32)                0         
_________________________________________________________________
dense (Dense)                (None, 32)                1056      
_________________________________________________________________
dense_1 (Dense)              (None, 1)                 33        
=================================================================
Total params: 6,977
Trainable params: 6,977
Non-trainable params: 0
_________________________________________________________________
In [14]:
model.compile(loss=keras.losses.BinaryCrossentropy(), optimizer=optimizers.Adam(0.0001), metrics=['accuracy'])

history = model.fit(Xtrain, ytrain, validation_data=(Xval, yval), epochs=300, callbacks=cb)
Epoch 1/300
455/455 [==============================] - 1s 2ms/step - loss: 0.5435 - accuracy: 0.6936 - val_loss: 0.3611 - val_accuracy: 0.8918
Epoch 2/300
455/455 [==============================] - 1s 2ms/step - loss: 0.3818 - accuracy: 0.8122 - val_loss: 0.3281 - val_accuracy: 0.8216
Epoch 3/300
455/455 [==============================] - 1s 2ms/step - loss: 0.3114 - accuracy: 0.8671 - val_loss: 0.3006 - val_accuracy: 0.8481
Epoch 4/300
455/455 [==============================] - 1s 2ms/step - loss: 0.2705 - accuracy: 0.8897 - val_loss: 0.2806 - val_accuracy: 0.8762
Epoch 5/300
455/455 [==============================] - 1s 2ms/step - loss: 0.2408 - accuracy: 0.9087 - val_loss: 0.2809 - val_accuracy: 0.8722
Epoch 6/300
455/455 [==============================] - 1s 2ms/step - loss: 0.2204 - accuracy: 0.9177 - val_loss: 0.2557 - val_accuracy: 0.8977
Epoch 7/300
455/455 [==============================] - 1s 2ms/step - loss: 0.2058 - accuracy: 0.9241 - val_loss: 0.2575 - val_accuracy: 0.8900
Epoch 8/300
455/455 [==============================] - 1s 2ms/step - loss: 0.1929 - accuracy: 0.9266 - val_loss: 0.2318 - val_accuracy: 0.9122
Epoch 9/300
455/455 [==============================] - 1s 2ms/step - loss: 0.1829 - accuracy: 0.9300 - val_loss: 0.2294 - val_accuracy: 0.9082
Epoch 10/300
455/455 [==============================] - 1s 2ms/step - loss: 0.1726 - accuracy: 0.9321 - val_loss: 0.2183 - val_accuracy: 0.9133
Epoch 11/300
455/455 [==============================] - 1s 2ms/step - loss: 0.1649 - accuracy: 0.9365 - val_loss: 0.2084 - val_accuracy: 0.9159
Epoch 12/300
455/455 [==============================] - 1s 2ms/step - loss: 0.1561 - accuracy: 0.9393 - val_loss: 0.1955 - val_accuracy: 0.9221
Epoch 13/300
455/455 [==============================] - 1s 2ms/step - loss: 0.1482 - accuracy: 0.9424 - val_loss: 0.1900 - val_accuracy: 0.9210
Epoch 14/300
455/455 [==============================] - 1s 2ms/step - loss: 0.1406 - accuracy: 0.9448 - val_loss: 0.1818 - val_accuracy: 0.9319
Epoch 15/300
455/455 [==============================] - 1s 2ms/step - loss: 0.1341 - accuracy: 0.9481 - val_loss: 0.1728 - val_accuracy: 0.9330
Epoch 16/300
455/455 [==============================] - 1s 2ms/step - loss: 0.1270 - accuracy: 0.9519 - val_loss: 0.1695 - val_accuracy: 0.9381
Epoch 17/300
455/455 [==============================] - 1s 2ms/step - loss: 0.1217 - accuracy: 0.9548 - val_loss: 0.1642 - val_accuracy: 0.9385
Epoch 18/300
455/455 [==============================] - 1s 2ms/step - loss: 0.1169 - accuracy: 0.9571 - val_loss: 0.1554 - val_accuracy: 0.9436
Epoch 19/300
455/455 [==============================] - 1s 2ms/step - loss: 0.1127 - accuracy: 0.9583 - val_loss: 0.1505 - val_accuracy: 0.9446
Epoch 20/300
455/455 [==============================] - 1s 2ms/step - loss: 0.1075 - accuracy: 0.9614 - val_loss: 0.1480 - val_accuracy: 0.9468
Epoch 21/300
455/455 [==============================] - 1s 2ms/step - loss: 0.1037 - accuracy: 0.9629 - val_loss: 0.1413 - val_accuracy: 0.9483
Epoch 22/300
455/455 [==============================] - 1s 2ms/step - loss: 0.1000 - accuracy: 0.9646 - val_loss: 0.1383 - val_accuracy: 0.9505
Epoch 23/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0979 - accuracy: 0.9647 - val_loss: 0.1345 - val_accuracy: 0.9523
Epoch 24/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0939 - accuracy: 0.9680 - val_loss: 0.1308 - val_accuracy: 0.9527
Epoch 25/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0915 - accuracy: 0.9673 - val_loss: 0.1279 - val_accuracy: 0.9559
Epoch 26/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0890 - accuracy: 0.9693 - val_loss: 0.1272 - val_accuracy: 0.9534
Epoch 27/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0880 - accuracy: 0.9698 - val_loss: 0.1224 - val_accuracy: 0.9581
Epoch 28/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0852 - accuracy: 0.9701 - val_loss: 0.1218 - val_accuracy: 0.9581
Epoch 29/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0832 - accuracy: 0.9721 - val_loss: 0.1171 - val_accuracy: 0.9581
Epoch 30/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0833 - accuracy: 0.9713 - val_loss: 0.1156 - val_accuracy: 0.9588
Epoch 31/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0795 - accuracy: 0.9738 - val_loss: 0.1272 - val_accuracy: 0.9588
Epoch 32/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0784 - accuracy: 0.9722 - val_loss: 0.1107 - val_accuracy: 0.9621
Epoch 33/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0781 - accuracy: 0.9729 - val_loss: 0.1118 - val_accuracy: 0.9599
Epoch 34/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0752 - accuracy: 0.9748 - val_loss: 0.1076 - val_accuracy: 0.9632
Epoch 35/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0737 - accuracy: 0.9752 - val_loss: 0.1060 - val_accuracy: 0.9636
Epoch 36/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0726 - accuracy: 0.9752 - val_loss: 0.1036 - val_accuracy: 0.9636
Epoch 37/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0714 - accuracy: 0.9766 - val_loss: 0.1036 - val_accuracy: 0.9661
Epoch 38/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0713 - accuracy: 0.9750 - val_loss: 0.1018 - val_accuracy: 0.9650
Epoch 39/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0695 - accuracy: 0.9769 - val_loss: 0.1032 - val_accuracy: 0.9672
Epoch 40/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0677 - accuracy: 0.9770 - val_loss: 0.1000 - val_accuracy: 0.9676
Epoch 41/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0673 - accuracy: 0.9766 - val_loss: 0.0973 - val_accuracy: 0.9683
Epoch 42/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0655 - accuracy: 0.9776 - val_loss: 0.0978 - val_accuracy: 0.9665
Epoch 43/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0669 - accuracy: 0.9777 - val_loss: 0.1030 - val_accuracy: 0.9639
Epoch 44/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0651 - accuracy: 0.9776 - val_loss: 0.0943 - val_accuracy: 0.9687
Epoch 45/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0635 - accuracy: 0.9776 - val_loss: 0.0931 - val_accuracy: 0.9701
Epoch 46/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0628 - accuracy: 0.9777 - val_loss: 0.0979 - val_accuracy: 0.9672
Epoch 47/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0623 - accuracy: 0.9784 - val_loss: 0.1001 - val_accuracy: 0.9647
Epoch 48/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0612 - accuracy: 0.9793 - val_loss: 0.0995 - val_accuracy: 0.9665
Epoch 49/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0604 - accuracy: 0.9790 - val_loss: 0.0900 - val_accuracy: 0.9712
Epoch 50/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0587 - accuracy: 0.9796 - val_loss: 0.0900 - val_accuracy: 0.9712
Epoch 51/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0595 - accuracy: 0.9791 - val_loss: 0.0904 - val_accuracy: 0.9680
Epoch 52/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0579 - accuracy: 0.9795 - val_loss: 0.0902 - val_accuracy: 0.9712
Epoch 53/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0578 - accuracy: 0.9798 - val_loss: 0.0906 - val_accuracy: 0.9716
Epoch 54/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0559 - accuracy: 0.9815 - val_loss: 0.0854 - val_accuracy: 0.9716
Epoch 55/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0557 - accuracy: 0.9801 - val_loss: 0.0849 - val_accuracy: 0.9720
Epoch 56/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0544 - accuracy: 0.9817 - val_loss: 0.0853 - val_accuracy: 0.9716
Epoch 57/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0539 - accuracy: 0.9816 - val_loss: 0.0844 - val_accuracy: 0.9716
Epoch 58/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0542 - accuracy: 0.9818 - val_loss: 0.0983 - val_accuracy: 0.9629
Epoch 59/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0531 - accuracy: 0.9828 - val_loss: 0.0846 - val_accuracy: 0.9712
Epoch 60/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0528 - accuracy: 0.9815 - val_loss: 0.0833 - val_accuracy: 0.9738
Epoch 61/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0514 - accuracy: 0.9828 - val_loss: 0.0815 - val_accuracy: 0.9745
Epoch 62/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0509 - accuracy: 0.9816 - val_loss: 0.0811 - val_accuracy: 0.9749
Epoch 63/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0513 - accuracy: 0.9819 - val_loss: 0.0806 - val_accuracy: 0.9734
Epoch 64/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0499 - accuracy: 0.9835 - val_loss: 0.0865 - val_accuracy: 0.9712
Epoch 65/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0503 - accuracy: 0.9818 - val_loss: 0.0817 - val_accuracy: 0.9749
Epoch 66/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0489 - accuracy: 0.9825 - val_loss: 0.0834 - val_accuracy: 0.9741
Epoch 67/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0485 - accuracy: 0.9836 - val_loss: 0.0865 - val_accuracy: 0.9716
Epoch 68/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0471 - accuracy: 0.9838 - val_loss: 0.0789 - val_accuracy: 0.9749
Epoch 69/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0471 - accuracy: 0.9839 - val_loss: 0.0811 - val_accuracy: 0.9741
Epoch 70/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0477 - accuracy: 0.9836 - val_loss: 0.0777 - val_accuracy: 0.9749
Epoch 71/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0473 - accuracy: 0.9828 - val_loss: 0.0797 - val_accuracy: 0.9745
Epoch 72/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0462 - accuracy: 0.9836 - val_loss: 0.0809 - val_accuracy: 0.9741
Epoch 73/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0463 - accuracy: 0.9826 - val_loss: 0.0778 - val_accuracy: 0.9760
Epoch 74/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0453 - accuracy: 0.9840 - val_loss: 0.0793 - val_accuracy: 0.9734
Epoch 75/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0451 - accuracy: 0.9847 - val_loss: 0.0765 - val_accuracy: 0.9763
Epoch 76/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0447 - accuracy: 0.9836 - val_loss: 0.0839 - val_accuracy: 0.9727
Epoch 77/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0452 - accuracy: 0.9836 - val_loss: 0.0760 - val_accuracy: 0.9767
Epoch 78/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0443 - accuracy: 0.9844 - val_loss: 0.0753 - val_accuracy: 0.9763
Epoch 79/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0436 - accuracy: 0.9840 - val_loss: 0.0781 - val_accuracy: 0.9749
Epoch 80/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0425 - accuracy: 0.9843 - val_loss: 0.0774 - val_accuracy: 0.9774
Epoch 81/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0431 - accuracy: 0.9849 - val_loss: 0.0744 - val_accuracy: 0.9767
Epoch 82/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0425 - accuracy: 0.9842 - val_loss: 0.0742 - val_accuracy: 0.9767
Epoch 83/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0414 - accuracy: 0.9847 - val_loss: 0.0768 - val_accuracy: 0.9774
Epoch 84/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0422 - accuracy: 0.9847 - val_loss: 0.0749 - val_accuracy: 0.9752
Epoch 85/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0403 - accuracy: 0.9860 - val_loss: 0.0738 - val_accuracy: 0.9778
Epoch 86/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0409 - accuracy: 0.9849 - val_loss: 0.0737 - val_accuracy: 0.9778
Epoch 87/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0419 - accuracy: 0.9849 - val_loss: 0.0775 - val_accuracy: 0.9763
Epoch 88/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0399 - accuracy: 0.9858 - val_loss: 0.0743 - val_accuracy: 0.9785
Epoch 89/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0395 - accuracy: 0.9856 - val_loss: 0.0733 - val_accuracy: 0.9771
Epoch 90/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0393 - accuracy: 0.9851 - val_loss: 0.0740 - val_accuracy: 0.9767
Epoch 91/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0400 - accuracy: 0.9849 - val_loss: 0.0754 - val_accuracy: 0.9789
Epoch 92/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0384 - accuracy: 0.9856 - val_loss: 0.0777 - val_accuracy: 0.9763
Epoch 93/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0391 - accuracy: 0.9850 - val_loss: 0.0773 - val_accuracy: 0.9752
Epoch 94/300
455/455 [==============================] - 1s 2ms/step - loss: 0.0390 - accuracy: 0.9856 - val_loss: 0.0734 - val_accuracy: 0.9771
In [15]:
model.evaluate(Xval, yval)
86/86 [==============================] - 0s 1ms/step - loss: 0.0733 - accuracy: 0.9771
Out[15]:
[0.07325273007154465, 0.9770575165748596]
In [16]:
ypred = (model.predict(Xval)>0.5).astype('int')
In [17]:
cm = confusion_matrix(yval, ypred)

cm = cm.astype('int') / cm.sum(axis=1)[:, np.newaxis]

fig = plt.figure(figsize = (4, 4))
ax = fig.add_subplot(111)

for i in range(cm.shape[1]):
    for j in range(cm.shape[0]):
        if cm[i,j] > 0.8:
            clr = "white"
        else:
            clr = "black"
        ax.text(j, i, format(cm[i, j], '.2f'), horizontalalignment="center", color=clr)

_ = ax.imshow(cm, cmap=plt.cm.Blues)
ax.set_xticks(range(len(class_names)))
ax.set_yticks(range(len(class_names)))
ax.set_xticklabels(class_names, rotation = 90)
ax.set_yticklabels(class_names)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show()
In [18]:
f1_score(yval, ypred, average = 'binary')
Out[18]:
0.9001584786053883

Plotting the metrics

In [19]:
def plot(history, variable1, variable2):
    plt.plot(range(len(history[variable1])), history[variable1])
    plt.plot(range(len(history[variable2])), history[variable2])
    plt.legend([variable1, variable2])
    plt.title(variable1)
In [20]:
plot(history.history, "accuracy", 'val_accuracy')
In [21]:
plot(history.history, "loss", 'val_loss')

Prediction

In [22]:
# pick random test data sample from one batch
x = random.randint(0, 32 - 1) # default batch size is 32

for i in val_ds.as_numpy_iterator():
    img, label = i    
    plt.axis('off')   # remove axes
    plt.imshow(img[x])    # shape from (64, 64, 64, 1) --> (64, 64, 1)
    output = model.predict(np.expand_dims(img[x],0))[0][0]    # getting output; input shape (64, 64, 3) --> (1, 64, 64, 1)
    pred = (output > 0.5).astype('int')
    print("Predicted: ", class_names[pred], '(', output, '-->', pred, ')')    # Picking the label from class_names base don the model output
    print("True: ", class_names[label[x]])
    break
Predicted:  elephant ( 0.0 --> 0 )
True:  elephant

deepC

In [23]:
model.save('elephant_classification.h5')

!deepCC elephant_classification.h5
[INFO]
Reading [keras model] 'elephant_classification.h5'
[SUCCESS]
Saved 'elephant_classification_deepC/elephant_classification.onnx'
[INFO]
Reading [onnx model] 'elephant_classification_deepC/elephant_classification.onnx'
[INFO]
Model info:
  ir_vesion : 5
  doc       : 
[WARNING]
[ONNX]: terminal (input/output) conv2d_input's shape is less than 1. Changing it to 1.
[WARNING]
[ONNX]: terminal (input/output) dense_1's shape is less than 1. Changing it to 1.
WARN (GRAPH): found operator node with the same name (dense_1) as io node.
[INFO]
Running DNNC graph sanity check ...
[SUCCESS]
Passed sanity check.
[INFO]
Writing C++ file 'elephant_classification_deepC/elephant_classification.cpp'
[INFO]
deepSea model files are ready in 'elephant_classification_deepC/' 
[RUNNING COMMAND]
g++ -std=c++11 -O3 -fno-rtti -fno-exceptions -I. -I/opt/tljh/user/lib/python3.7/site-packages/deepC-0.13-py3.7-linux-x86_64.egg/deepC/include -isystem /opt/tljh/user/lib/python3.7/site-packages/deepC-0.13-py3.7-linux-x86_64.egg/deepC/packages/eigen-eigen-323c052e1731 "elephant_classification_deepC/elephant_classification.cpp" -D_AITS_MAIN -o "elephant_classification_deepC/elephant_classification.exe"
[RUNNING COMMAND]
size "elephant_classification_deepC/elephant_classification.exe"
   text	   data	    bss	    dec	    hex	filename
 200797	   3760	    760	 205317	  32205	elephant_classification_deepC/elephant_classification.exe
[SUCCESS]
Saved model as executable "elephant_classification_deepC/elephant_classification.exe"