Cainvas

SPIDER BREED CLASSIFICATION

Import Library

In [1]:
!pip install wget
import numpy as np 
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
import cv2
from sklearn.model_selection import StratifiedShuffleSplit,train_test_split
import glob
import tensorflow as tf
from sklearn import metrics
from tensorflow import keras
!wget -N 'https://cainvas-static.s3.amazonaws.com/media/user_data/devanshchowd/spiders.tar.gz'
import tarfile
file = tarfile.open('spiders.tar.gz')
file.extractall('./')
file.close()
Defaulting to user installation because normal site-packages is not writeable
Requirement already satisfied: wget in ./.local/lib/python3.7/site-packages (3.2)
WARNING: You are using pip version 20.3.1; however, version 21.1.3 is available.
You should consider upgrading via the '/opt/tljh/user/bin/python -m pip install --upgrade pip' command.
--2021-07-04 12:21:22--  https://cainvas-static.s3.amazonaws.com/media/user_data/devanshchowd/spiders.tar.gz
Resolving cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)... 52.219.64.120
Connecting to cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)|52.219.64.120|:443... connected.
HTTP request sent, awaiting response... 304 Not Modified
File ‘spiders.tar.gz’ not modified on server. Omitting download.

Basic Analysis

In [2]:
print("Training Sample" ,len(glob.glob('spiders/train/*/*')),"Validation Sample" ,len(glob.glob('spiders/valid/*/*')),"Testing Sample" ,len(glob.glob('spiders/test/*/*')))
data = pd.read_csv('spiders/spiders.csv')
data.filepaths = data.filepaths.apply(lambda x:x.replace("\\","/"))
data['category'] = data.filepaths.apply(lambda x:x.split("/")[0])
sns.set_style('darkgrid')
sns.countplot(data = data,y = 'labels')
path = 'spiders'
data.filepaths = data.filepaths.apply(lambda x:os.path.join(path,x))
Training Sample 366 Validation Sample 10 Testing Sample 10
In [3]:
def get_img(index):
    img = plt.imread(data.iloc[index]['filepaths'])
    return img
demo = np.append(np.array(data[data['labels']==data['labels'].unique()[0]].index[:5]),np.array(data[data['labels']==data['labels'].unique()[1]].index[:5]))
fig,ax = plt.subplots(2,5,figsize=(15,10))
plt.tick_params(left = False, bottom = False)
for i in range(2):
    for j in range(5):
        ax[i][j].imshow(get_img(demo[i*5+j]))
        ax[i][j].tick_params(left = False, bottom = False)
        ax[i][j].set_xticks([])
        ax[i][j].set_yticks([])
        ax[i][j].set_title(data.iloc[demo[i*5+j]].labels)
fig.savefig('Sample.png')

Model

In [4]:
def get_model(dim = (224,224,3)):
    keras.backend.clear_session()
    model = keras.models.Sequential()
    model.add(keras.layers.Conv2D(32,(3,3),activation='relu',input_shape=dim))
    model.add(keras.layers.Conv2D(32,(3,3),activation='relu',padding="valid",))
    model.add(keras.layers.MaxPooling2D((2,2)))
    model.add(keras.layers.Dropout(.4))
    model.add(keras.layers.Conv2D(32,(5,5),activation='relu',padding="valid"))
    model.add(keras.layers.MaxPooling2D((2,2)))
    model.add(keras.layers.Dropout(.4))
    model.add(keras.layers.Conv2D(64,(5,5),activation='relu',padding="valid"))
    model.add(keras.layers.MaxPooling2D((2,2)))
    model.add(keras.layers.Dropout(.4))
    model.add(keras.layers.Conv2D(64,(5,5),activation='relu',padding="valid"))
    model.add(keras.layers.MaxPooling2D((2,2)))
    model.add(keras.layers.Dropout(.4))
    model.add(keras.layers.Flatten()) 
    model.add(keras.layers.Dropout(.4))
    model.add(keras.layers.Dense(2,activation='softmax'))
    model.summary()
    return model
train_val,test = train_test_split(data,stratify=data['labels'],test_size=40,shuffle=True)
train,val  = train_test_split(train_val,stratify=train_val['labels'],test_size=.2,shuffle=True)
train.reset_index(drop=True,inplace=True)
val.reset_index(drop=True,inplace=True)
test.reset_index(drop=True,inplace=True)
In [5]:
get_model()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 222, 222, 32)      896       
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 220, 220, 32)      9248      
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 110, 110, 32)      0         
_________________________________________________________________
dropout (Dropout)            (None, 110, 110, 32)      0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 106, 106, 32)      25632     
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 53, 53, 32)        0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 53, 53, 32)        0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 49, 49, 64)        51264     
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 24, 24, 64)        0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 24, 24, 64)        0         
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 20, 20, 64)        102464    
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 10, 10, 64)        0         
_________________________________________________________________
dropout_3 (Dropout)          (None, 10, 10, 64)        0         
_________________________________________________________________
flatten (Flatten)            (None, 6400)              0         
_________________________________________________________________
dropout_4 (Dropout)          (None, 6400)              0         
_________________________________________________________________
dense (Dense)                (None, 2)                 12802     
=================================================================
Total params: 202,306
Trainable params: 202,306
Non-trainable params: 0
_________________________________________________________________
Out[5]:
<tensorflow.python.keras.engine.sequential.Sequential at 0x7f5fdf28e5c0>
In [6]:
datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1/255.,
                                  horizontal_flip = True,
                                  vertical_flip=True)
datagen_scale = keras.preprocessing.image.ImageDataGenerator(rescale=1/255)
train_generator= datagen.flow_from_dataframe(train,x_col ='filepaths',y_col='labels',target_size=(224,224),batch_size=64,shuffle=True)
valid_generator= datagen.flow_from_dataframe(val,x_col ='filepaths',y_col='labels',target_size=(224,224),shuffle=False)
test_generator = datagen.flow_from_dataframe(test,x_col ='filepaths',y_col='labels',target_size=(224,224),shuffle=False)
Found 276 validated image filenames belonging to 2 classes.
Found 70 validated image filenames belonging to 2 classes.
Found 40 validated image filenames belonging to 2 classes.
In [7]:
model = get_model()
model.compile(loss='binary_crossentropy',metrics='accuracy',optimizer=keras.optimizers.Adam(learning_rate=.001))
early_stop = keras.callbacks.EarlyStopping(monitor= 'val_accuracy',patience=5,restore_best_weights=True)
red_lr = keras.callbacks.ReduceLROnPlateau(patience=3,monitor= 'val_accuracy',factor=0.75)
model_check = keras.callbacks.ModelCheckpoint(f"cnn.h5",save_best_only=True)
history = model.fit(train_generator,validation_data=valid_generator,epochs=200,callbacks = [early_stop,model_check,red_lr])
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 222, 222, 32)      896       
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 220, 220, 32)      9248      
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 110, 110, 32)      0         
_________________________________________________________________
dropout (Dropout)            (None, 110, 110, 32)      0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 106, 106, 32)      25632     
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 53, 53, 32)        0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 53, 53, 32)        0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 49, 49, 64)        51264     
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 24, 24, 64)        0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 24, 24, 64)        0         
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 20, 20, 64)        102464    
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 10, 10, 64)        0         
_________________________________________________________________
dropout_3 (Dropout)          (None, 10, 10, 64)        0         
_________________________________________________________________
flatten (Flatten)            (None, 6400)              0         
_________________________________________________________________
dropout_4 (Dropout)          (None, 6400)              0         
_________________________________________________________________
dense (Dense)                (None, 2)                 12802     
=================================================================
Total params: 202,306
Trainable params: 202,306
Non-trainable params: 0
_________________________________________________________________
Epoch 1/200
5/5 [==============================] - 2s 468ms/step - loss: 0.8332 - accuracy: 0.5217 - val_loss: 0.6919 - val_accuracy: 0.5000
Epoch 2/200
5/5 [==============================] - 1s 175ms/step - loss: 0.6861 - accuracy: 0.5652 - val_loss: 0.6918 - val_accuracy: 0.6000
Epoch 3/200
5/5 [==============================] - 1s 154ms/step - loss: 0.6754 - accuracy: 0.6377 - val_loss: 0.6855 - val_accuracy: 0.6857
Epoch 4/200
5/5 [==============================] - 1s 153ms/step - loss: 0.6499 - accuracy: 0.6558 - val_loss: 0.6612 - val_accuracy: 0.8000
Epoch 5/200
5/5 [==============================] - 1s 153ms/step - loss: 0.6145 - accuracy: 0.6449 - val_loss: 0.5762 - val_accuracy: 0.8143
Epoch 6/200
5/5 [==============================] - 1s 167ms/step - loss: 0.5068 - accuracy: 0.7717 - val_loss: 0.6041 - val_accuracy: 0.6714
Epoch 7/200
5/5 [==============================] - 1s 150ms/step - loss: 0.4733 - accuracy: 0.8080 - val_loss: 0.5213 - val_accuracy: 0.7857
Epoch 8/200
5/5 [==============================] - 1s 154ms/step - loss: 0.3993 - accuracy: 0.8297 - val_loss: 0.4088 - val_accuracy: 0.7857
Epoch 9/200
5/5 [==============================] - 1s 132ms/step - loss: 0.4123 - accuracy: 0.8732 - val_loss: 0.6390 - val_accuracy: 0.6143
Epoch 10/200
5/5 [==============================] - 1s 149ms/step - loss: 0.4850 - accuracy: 0.7536 - val_loss: 0.4740 - val_accuracy: 0.8714
Epoch 11/200
5/5 [==============================] - 1s 154ms/step - loss: 0.3220 - accuracy: 0.8696 - val_loss: 0.3999 - val_accuracy: 0.7857
Epoch 12/200
5/5 [==============================] - 1s 170ms/step - loss: 0.3926 - accuracy: 0.8225 - val_loss: 0.4561 - val_accuracy: 0.8857
Epoch 13/200
5/5 [==============================] - 1s 163ms/step - loss: 0.3464 - accuracy: 0.8804 - val_loss: 0.5047 - val_accuracy: 0.8286
Epoch 14/200
5/5 [==============================] - 1s 162ms/step - loss: 0.3005 - accuracy: 0.9022 - val_loss: 0.4032 - val_accuracy: 0.9000
Epoch 15/200
5/5 [==============================] - 1s 150ms/step - loss: 0.2535 - accuracy: 0.9275 - val_loss: 0.3457 - val_accuracy: 0.8571
Epoch 16/200
5/5 [==============================] - 1s 134ms/step - loss: 0.2390 - accuracy: 0.9130 - val_loss: 0.2935 - val_accuracy: 0.9143
Epoch 17/200
5/5 [==============================] - 1s 175ms/step - loss: 0.1898 - accuracy: 0.9348 - val_loss: 0.2786 - val_accuracy: 0.9000
Epoch 18/200
5/5 [==============================] - 1s 150ms/step - loss: 0.2092 - accuracy: 0.8949 - val_loss: 0.3686 - val_accuracy: 0.8286
Epoch 19/200
5/5 [==============================] - 1s 138ms/step - loss: 0.2598 - accuracy: 0.8841 - val_loss: 0.2464 - val_accuracy: 0.9286
Epoch 20/200
5/5 [==============================] - 1s 146ms/step - loss: 0.2266 - accuracy: 0.9312 - val_loss: 0.4563 - val_accuracy: 0.7571
Epoch 21/200
5/5 [==============================] - 1s 129ms/step - loss: 0.3922 - accuracy: 0.7754 - val_loss: 0.3613 - val_accuracy: 0.8857
Epoch 22/200
5/5 [==============================] - 1s 166ms/step - loss: 0.3182 - accuracy: 0.8587 - val_loss: 0.3991 - val_accuracy: 0.8429
Epoch 23/200
5/5 [==============================] - 1s 149ms/step - loss: 0.2299 - accuracy: 0.9312 - val_loss: 0.4060 - val_accuracy: 0.9000
Epoch 24/200
5/5 [==============================] - 1s 170ms/step - loss: 0.2035 - accuracy: 0.9275 - val_loss: 0.2731 - val_accuracy: 0.9286
In [8]:
fig = plt.figure()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
fig.savefig('Accuracy vs Epoch.png')
In [9]:
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
fig.savefig('Loss vs Epoch.png')

EVALUATE

In [10]:
datagen_scale = keras.preprocessing.image.ImageDataGenerator(rescale=1/255)
train_generator= datagen_scale.flow_from_dataframe(train,x_col ='filepaths',y_col='labels',target_size=(224,224),batch_size=64,shuffle=True)
valid_generator= datagen_scale.flow_from_dataframe(val,x_col ='filepaths',y_col='labels',target_size=(224,224),shuffle=False)
test_generator = datagen_scale.flow_from_dataframe(test,x_col ='filepaths',y_col='labels',target_size=(224,224),shuffle=False)
model.evaluate(train_generator)
model.evaluate(valid_generator)
model.evaluate(test_generator)
Found 276 validated image filenames belonging to 2 classes.
Found 70 validated image filenames belonging to 2 classes.
Found 40 validated image filenames belonging to 2 classes.
5/5 [==============================] - 0s 82ms/step - loss: 0.1631 - accuracy: 0.9529
3/3 [==============================] - 0s 16ms/step - loss: 0.2421 - accuracy: 0.9143
2/2 [==============================] - 0s 43ms/step - loss: 0.2049 - accuracy: 0.9000
Out[10]:
[0.20493611693382263, 0.8999999761581421]

PREDICT

In [11]:
demo = np.append(np.array(test[test['labels']==test['labels'].unique()[0]].index[:5]),np.array(test[test['labels']==test['labels'].unique()[1]].index[:5]))
demo = test.iloc[demo]
demo.reset_index(drop=False,inplace=True)
datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1/255)
test_generator = datagen.flow_from_dataframe(demo,x_col ='filepaths',y_col='labels',target_size=(224,224),shuffle=False)
demo = pd.concat((demo,pd.Series(np.argmax(model.predict(test_generator),axis=1),name='Prediction').map({a:b for b,a in test_generator.class_indices.items()})),axis=1)
Found 10 validated image filenames belonging to 2 classes.
In [12]:
fig,ax = plt.subplots(2,5,figsize=(20,10))
plt.tick_params(left = False, bottom = False)
for i in range(2):
    for j in range(5):
        ax[i][j].imshow(plt.imread(demo.iloc[i*5+j].filepaths))
        ax[i][j].tick_params(left = False, bottom = False)
        ax[i][j].set_xticks([])
        ax[i][j].set_yticks([])
        ax[i][j].set_title("Truth Label "+demo.iloc[i*5+j].labels+"   \n PREDICTED   "+demo.iloc[i*5+j].Prediction)
fig.savefig('Predict.png')
In [13]:
model.save("Spider_Breed_Classification.h5")