Cainvas
Model Files
Whales.h5
keras
Model
deepSea Compiled Models
Whales.exe
deepSea
Ubuntu

Whale Detection based on Arieal Images

Credit: AITS Cainvas Community

Photo by Lacey on Dribbble

This will help drones in identifying whales when they surface

In [1]:
#!wget -N "https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/Final_Whale_Dataset.zip" -O Final_Whale_Dataset.zip
#!unzip -qo Final_Whale_Dataset.zip
In [2]:
import cv2
import matplotlib.pyplot as plt 
import seaborn as sns
import os
from PIL import Image
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.utils import to_categorical
import numpy as np
import os
In [3]:
Whales = os.listdir("Final Whale Dataset/Yes/")
Not_whales = os.listdir("Final Whale Dataset/No/")
In [4]:
Whales[:10]
Out[4]:
['w_9338.jpg',
 'w_9101.jpg',
 'w_5481.jpg',
 'w_1835.jpg',
 'w_10953.jpg',
 'w_8559.jpg',
 'w_10111.jpg',
 'w_10300.jpg',
 'w_10301.jpg',
 'w_10378.jpg']
In [5]:
Not_whales[:10]
Out[5]:
['Img-9933.jpg',
 'Img-8783.jpg',
 'Img-3923.jpg',
 'Img-7672.jpg',
 'Img-8859.jpg',
 'Img-9142.jpg',
 'Img-3396.jpg',
 'Img-11043.jpg',
 'Img-8806.jpg',
 'Img-10915.jpg']

Looking at the Dataset

Here we have used whales for positive and other animals as negative

In [6]:
plt.figure(figsize = (12,12))
for i in range(4):
    plt.subplot(1, 4, i+1)
    img = cv2.imread("Final Whale Dataset/Yes/" + Whales[i])
    plt.imshow(img)
    plt.title('Whales : 1')
    plt.tight_layout()
plt.show()
In [7]:
plt.figure(figsize = (12,12))
for i in range(4):
    plt.subplot(1, 4, i+1)
    img = cv2.imread('Final Whale Dataset/No/'+ Not_whales[i+1])
    plt.imshow(img)
    plt.title('Not Whale : 0')
    plt.tight_layout()
plt.show()

Creating Training and Validation

In [8]:
data = []
labels = []
for img in Whales:
    try:
        img_read = plt.imread('Final Whale Dataset/Yes/' + img)
        img_resize = cv2.resize(img_read, (50, 50))
        img_array = img_to_array(img_resize)
        data.append(img_array)
        labels.append(1)
    except:
        None
        
for img in Not_whales:
    try:
        img_read = plt.imread('Final Whale Dataset/No/' + img)
        img_resize = cv2.resize(img_read, (50, 50))
        img_array = img_to_array(img_resize)
        data.append(img_array)
        labels.append(0)
    except:
        None
In [9]:
image_data = np.array(data)
labels = np.array(labels)
In [10]:
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(image_data, labels, test_size = 0.33, random_state = 101)
In [11]:
x_train = x_train/255
x_test = x_test/255
In [12]:
y_train = to_categorical(y_train, num_classes = 2)
y_test = to_categorical(y_test, num_classes = 2)
In [13]:
print(f'SHAPE OF TRAINING IMAGE DATA : {x_train.shape}')
print(f'SHAPE OF TESTING IMAGE DATA : {x_test.shape}')
print(f'SHAPE OF TRAINING LABELS : {y_train.shape}')
print(f'SHAPE OF TESTING LABELS : {y_test.shape}')
SHAPE OF TRAINING IMAGE DATA : (14318, 50, 50, 3)
SHAPE OF TESTING IMAGE DATA : (7053, 50, 50, 3)
SHAPE OF TRAINING LABELS : (14318, 2)
SHAPE OF TESTING LABELS : (7053, 2)

Creating Model

In [14]:
from tensorflow.keras.layers import Dense, Conv2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import MaxPooling2D, GlobalAveragePooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras import backend as K

from tensorflow.keras import optimizers
In [15]:
def CNNbuild(height, width, classes, channels):
    model = Sequential()
    
    inputShape = (height, width, channels)
    chanDim = -1
    
    if K.image_data_format() == 'channels_first':
        inputShape = (channels, height, width)
    model.add(Conv2D(32, (3,3), activation = 'relu', input_shape = inputShape))
    model.add(MaxPooling2D(2,2))
    model.add(BatchNormalization(axis = chanDim))

    model.add(Conv2D(32, (3,3), activation = 'relu'))
    model.add(MaxPooling2D(2,2))
    model.add(BatchNormalization(axis = chanDim))

    model.add(Conv2D(32, (3,3), activation = 'relu'))
    model.add(MaxPooling2D(2,2))
    model.add(BatchNormalization(axis = chanDim))

    model.add(Flatten())
    
    model.add(Dense(8, activation = 'relu'))
    model.add(Dense(8, activation = 'relu'))
    model.add(Dense(8, activation = 'relu'))
    model.add(BatchNormalization(axis = chanDim))
    model.add(Dropout(0.5))
    model.add(Dense(classes, activation = 'softmax'))
    
    return model
In [16]:
height = 50
width = 50
classes = 2
channels = 3
model = CNNbuild(height = height, width = width, classes = classes, channels = channels)
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 48, 48, 32)        896       
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 24, 24, 32)        0         
_________________________________________________________________
batch_normalization (BatchNo (None, 24, 24, 32)        128       
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 22, 22, 32)        9248      
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 11, 11, 32)        0         
_________________________________________________________________
batch_normalization_1 (Batch (None, 11, 11, 32)        128       
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 9, 9, 32)          9248      
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 4, 4, 32)          0         
_________________________________________________________________
batch_normalization_2 (Batch (None, 4, 4, 32)          128       
_________________________________________________________________
flatten (Flatten)            (None, 512)               0         
_________________________________________________________________
dense (Dense)                (None, 8)                 4104      
_________________________________________________________________
dense_1 (Dense)              (None, 8)                 72        
_________________________________________________________________
dense_2 (Dense)              (None, 8)                 72        
_________________________________________________________________
batch_normalization_3 (Batch (None, 8)                 32        
_________________________________________________________________
dropout (Dropout)            (None, 8)                 0         
_________________________________________________________________
dense_3 (Dense)              (None, 2)                 18        
=================================================================
Total params: 24,074
Trainable params: 23,866
Non-trainable params: 208
_________________________________________________________________
In [17]:
model.compile(loss = 'categorical_crossentropy', optimizer = 'Adam', metrics = ['accuracy'])
In [18]:
h = model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs = 20, batch_size = 32)
Epoch 1/20
448/448 [==============================] - 2s 5ms/step - loss: 0.2155 - accuracy: 0.9220 - val_loss: 0.1067 - val_accuracy: 0.9607
Epoch 2/20
448/448 [==============================] - 2s 4ms/step - loss: 0.1003 - accuracy: 0.9728 - val_loss: 0.1514 - val_accuracy: 0.9429
Epoch 3/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0640 - accuracy: 0.9788 - val_loss: 0.0239 - val_accuracy: 0.9928
Epoch 4/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0607 - accuracy: 0.9805 - val_loss: 0.0228 - val_accuracy: 0.9918
Epoch 5/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0435 - accuracy: 0.9848 - val_loss: 0.3279 - val_accuracy: 0.9081
Epoch 6/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0523 - accuracy: 0.9822 - val_loss: 0.0131 - val_accuracy: 0.9957
Epoch 7/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0429 - accuracy: 0.9850 - val_loss: 0.0180 - val_accuracy: 0.9940
Epoch 8/20
448/448 [==============================] - 2s 5ms/step - loss: 0.0398 - accuracy: 0.9867 - val_loss: 0.0246 - val_accuracy: 0.9931
Epoch 9/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0391 - accuracy: 0.9846 - val_loss: 0.0384 - val_accuracy: 0.9891
Epoch 10/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0326 - accuracy: 0.9865 - val_loss: 0.0486 - val_accuracy: 0.9871
Epoch 11/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0376 - accuracy: 0.9857 - val_loss: 0.0275 - val_accuracy: 0.9906
Epoch 12/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0289 - accuracy: 0.9884 - val_loss: 0.3436 - val_accuracy: 0.9020
Epoch 13/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0302 - accuracy: 0.9878 - val_loss: 0.1364 - val_accuracy: 0.9522
Epoch 14/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0326 - accuracy: 0.9885 - val_loss: 0.0204 - val_accuracy: 0.9952
Epoch 15/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0271 - accuracy: 0.9893 - val_loss: 0.0101 - val_accuracy: 0.9967
Epoch 16/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0246 - accuracy: 0.9893 - val_loss: 0.0059 - val_accuracy: 0.9984
Epoch 17/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0278 - accuracy: 0.9879 - val_loss: 0.0052 - val_accuracy: 0.9986
Epoch 18/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0277 - accuracy: 0.9877 - val_loss: 0.0056 - val_accuracy: 0.9982
Epoch 19/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0242 - accuracy: 0.9892 - val_loss: 0.0286 - val_accuracy: 0.9918
Epoch 20/20
448/448 [==============================] - 2s 4ms/step - loss: 0.0309 - accuracy: 0.9867 - val_loss: 0.0102 - val_accuracy: 0.9973

PLotting Graph

In [19]:
plt.figure(figsize = (15,5))
plt.plot(range(20), h.history['accuracy'], label = 'Training Accuracy')
plt.plot(range(20), h.history['loss'], label = 'Taining Loss')
plt.plot(range(20), h.history['val_accuracy'], label = 'Taining Loss')
#ax1.set_xticks(np.arange(0, 31, 5))
plt.xlabel("Number of Epoch's")
plt.ylabel('Accuracy/Loss Value')
plt.title('Training Accuracy and Training Loss')
plt.legend(loc = "best")
Out[19]:
<matplotlib.legend.Legend at 0x7fe85c310b00>
In [20]:
predictions = model.evaluate(x_test, y_test)
221/221 [==============================] - 0s 2ms/step - loss: 0.0102 - accuracy: 0.9973
In [21]:
print(f'LOSS : {predictions[0]}')
print(f'ACCURACY : {predictions[1]}')
LOSS : 0.010186842642724514
ACCURACY : 0.9973061084747314
In [22]:
model.save("Whales.h5")

Prediction

In [23]:
plt.figure(figsize = (12,12))
img = cv2.imread("Final Whale Dataset/Yes/" + Whales[4])
img_resize = cv2.resize(img, (50, 50))
plt.subplot(1, 4, i+1)
plt.imshow(img)
output = model.predict(np.expand_dims(img_resize,0))[0][0]    # getting output; input shape (64, 64, 3) --> (1, 64, 64, 1)
print(output)
pred = (output > 0.5).astype('int')
print("Predicted: ", labels, '(', output, '-->', pred, ')')    # Picking the label from class_names base don the model output
plt.title('Whales : 0')
plt.tight_layout()
1.0
Predicted:  [1 1 1 ... 0 0 0] ( 1.0 --> 1 )
In [24]:
plt.figure(figsize = (12,12))
img = cv2.imread("Final Whale Dataset/Yes/" + Whales[6])
img_resize = cv2.resize(img, (50, 50))
plt.subplot(1, 4, i+1)
plt.imshow(img)
output = model.predict(np.expand_dims(img_resize,0))[0][0]    # getting output; input shape (64, 64, 3) --> (1, 64, 64, 1)
print(output)
pred = (output > 0.5).astype('int')
print("Predicted: ", labels, '(', output, '-->', pred, ')')    # Picking the label from class_names base don the model output
plt.title('Whales : 0')
plt.tight_layout()
1.0
Predicted:  [1 1 1 ... 0 0 0] ( 1.0 --> 1 )

DeepCC

In [ ]:
!deepCC