Cainvas

Insect Classifier

Credit: AITS Cainvas Community

Photo by FM ILLUSTRATION on Dribbble

  • Model classifies insects among Butterfly, Dragonfly, Grasshoper, Ladybird and Mosquito.
  • Dataset contains 3.8k images of insects collected from different websites.

Import Libraries and Fetch Data

In [1]:
import tensorflow as tf
import cv2
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import os
In [2]:
!wget https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/archive_2.zip
In [3]:
!unzip -qo archive_2.zip
In [4]:
# Directory where images are present
main_dir = 'archive/insects'
num_fldrs = 5
In [5]:
# dictionary of labels
insect_names = {'1':"Butterfly",'2':"Dragonfly",
               '3':"Grasshopper",'4':"Ladybird",
               '5':"Mosquito"}

Create Dataframe for reference purposes.

In [6]:
def getdata(folder_path,num_subfolders):
    global insect_names
    insects = pd.DataFrame(columns=['image_abs_path','image_labels'])
    for label in range(1,num_subfolders+1):
        #print("processing for label: {}".format(label))
        label_i = folder_path+"/"+insect_names[str(label)]
        #read directory
        dirs_label_i =  os.listdir(label_i)
        idx = 0
        for image in dirs_label_i:
            #create a absolute image path
            insect_i = os.path.join(label_i,image)
            #print('Absolute path for image no. {} and label {}: {}'\
                  #.format(idx,label,flower_i))

            #fill the dataframe with path and label
            insects = insects.append({'image_abs_path':insect_i,
                            'image_labels':insect_names[str(label)]},
                           ignore_index=True)
            idx += 1
    return insects
In [7]:
train = getdata(main_dir,num_fldrs)

Visualise Images

In [8]:
# A function to fetch single images based on path given
def get_image(path):
    img = cv2.imread(path,0)
    plt.imshow(img,cmap='gray')
In [9]:
# a function to fetch n images of any label given e.g. 5 Dragonfly and from which dataframe
def get_n_images(n,df,label):
    import warnings
    warnings.filterwarnings('ignore')
    train = df[df["image_labels"]==label]
    print(len(train))
    i = 0
    m = n/2
    plt.figure(figsize=(12, 6))
    for path in train['image_abs_path'][0:n]:
        plt.subplot(2,m,i+1)
        get_image(path)
        i += 1
    plt.tight_layout()
    plt.show()
In [10]:
get_n_images(10,train,"Dragonfly")
1036

Check Proportion of Insects

In [11]:
def plotHbar(df,labels):
    numbers = []
    label = list(labels.values())
    for i in label:
        numbers.append(len(df[df['image_labels']==i]))
    plt.figure(figsize=(5,2))
    plt.barh(label,numbers,align='center',color='green',)
    plt.title("Label Counts")
    plt.show()
In [12]:
plotHbar(train,insect_names)

Build Image Data generator to fetch image directly form main directory

In [13]:
def datapreprocessing(main_dir,bsize):
    from tensorflow.keras.preprocessing.image import ImageDataGenerator
    
    train_gen = ImageDataGenerator(rescale=1.0/255,
                                  validation_split=0.30,
                                  rotation_range=40,
                                  horizontal_flip=True,
                                  fill_mode='nearest')

    train_generator = train_gen.flow_from_directory(
        directory=main_dir,
        target_size=(150,150),
        batch_size=bsize,
        color_mode="rgb",
        shuffle=True,
        subset="training",
        class_mode='categorical')
    
    valid_generator = train_gen.flow_from_directory(
        directory=main_dir,
        target_size=(150,150),
        batch_size=bsize,
        color_mode="rgb",
        shuffle=True,
        subset="validation",
        class_mode='categorical')
    
    return train_generator, valid_generator
In [14]:
# Build train and validation sets
traingen,validgen = datapreprocessing(main_dir,20)
Found 3116 images belonging to 5 classes.
Found 1333 images belonging to 5 classes.

Visualise Images in Generator

In [15]:
def visualize_gen(train_generator):   
    #Visualising Images Processed
    plt.figure(figsize=(12, 6))
    for i in range(0, 10):
        plt.subplot(2, 5, i+1)
        for X_batch, Y_batch in train_generator:
            image = X_batch[0]        
            plt.axis("off")
            plt.imshow((image*255).astype(np.uint8))
            break
    plt.tight_layout()
    plt.show()
In [16]:
visualize_gen(traingen)

Build Model Architecture

In [17]:
def insectclf(input_shape):
    from tensorflow import keras as ks
    #from tensorflow.keras import regularizers
    model = ks.models.Sequential()
    #building architecture
    #Adding layers
    model.add(ks.layers.Conv2D(16,(3,3),
                               strides=1,
                               activation="relu",
                               padding='valid',
                               name="layer1",
                               input_shape=input_shape))
    model.add(ks.layers.MaxPooling2D(pool_size=4))
    model.add(ks.layers.Dropout(0.3))
    model.add(ks.layers.Conv2D(32,(3,3),strides=1,padding="valid",activation="relu",name="layer2"))
    model.add(ks.layers.MaxPooling2D(pool_size=2))
    model.add(ks.layers.Dropout(0.3))
    model.add(ks.layers.Conv2D(64,(3,3),strides=1,padding="valid",activation="relu",name="layer3"))
    model.add(ks.layers.MaxPooling2D(pool_size=2))
    model.add(ks.layers.Dropout(0.3))
    model.add(ks.layers.Conv2D(128,(3,3),strides=1,padding="valid",activation="relu",name="layer4"))
    model.add(ks.layers.MaxPooling2D(pool_size=2))
    model.add(ks.layers.Dropout(0.3))
    
    model.add(ks.layers.Flatten())
    model.add(ks.layers.Dense(128,activation="relu",
                              name="layer7"))
    model.add(ks.layers.Dropout(0.5))
    model.add(ks.layers.Dense(128,activation="relu",
                              name="layer8"))
    model.add(ks.layers.Dropout(0.5))
    
    model.add(ks.layers.Dense(5,activation="softmax",
                              name="output"))#20 classes 
    model.summary()
    
    return model

Build Model Compiler

In [18]:
def compiler(model,train_generator,valid_generator,epchs,bsize,lr=0.0001):

    from tensorflow import keras as ks
    callbck = ks.callbacks.EarlyStopping(monitor='val_loss',patience=20,
                                         verbose=2,restore_best_weights=True) 
    #red_lr= ReduceLROnPlateau(monitor='val_acc',patience=3,verbose=1,factor=0.1)
    opt = ks.optimizers.Adam(learning_rate=lr)
    
    model.compile(loss="categorical_crossentropy",
                      optimizer=opt,
                      metrics=["accuracy"])
    history = model.fit(train_generator,
                        epochs=epchs,
                        callbacks=[callbck],
                        validation_data=valid_generator,
                        verbose = 1,
                        steps_per_epoch = train_generator.n//bsize,
                        validation_steps = valid_generator.n//bsize
                        )
    #Visualise curves
    plt.plot(history.history['accuracy'], label='train_acc')
    plt.plot(history.history['val_accuracy'], label='valid_acc')

    plt.title('lrate='+str(lr), pad=-50)
    plt.legend()
    plt.grid(True)
    return model,history
In [19]:
# Get input shape
input_shape = traingen.image_shape
input_shape
Out[19]:
(150, 150, 3)

Fit and Evaluate

In [20]:
model01 = insectclf(input_shape)
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
layer1 (Conv2D)              (None, 148, 148, 16)      448       
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 37, 37, 16)        0         
_________________________________________________________________
dropout (Dropout)            (None, 37, 37, 16)        0         
_________________________________________________________________
layer2 (Conv2D)              (None, 35, 35, 32)        4640      
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 17, 17, 32)        0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 17, 17, 32)        0         
_________________________________________________________________
layer3 (Conv2D)              (None, 15, 15, 64)        18496     
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 7, 7, 64)          0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 7, 7, 64)          0         
_________________________________________________________________
layer4 (Conv2D)              (None, 5, 5, 128)         73856     
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 2, 2, 128)         0         
_________________________________________________________________
dropout_3 (Dropout)          (None, 2, 2, 128)         0         
_________________________________________________________________
flatten (Flatten)            (None, 512)               0         
_________________________________________________________________
layer7 (Dense)               (None, 128)               65664     
_________________________________________________________________
dropout_4 (Dropout)          (None, 128)               0         
_________________________________________________________________
layer8 (Dense)               (None, 128)               16512     
_________________________________________________________________
dropout_5 (Dropout)          (None, 128)               0         
_________________________________________________________________
output (Dense)               (None, 5)                 645       
=================================================================
Total params: 180,261
Trainable params: 180,261
Non-trainable params: 0
_________________________________________________________________
In [21]:
model01 = compiler(model01,traingen,validgen,100,bsize=32,lr=0.001)
Epoch 1/100
97/97 [==============================] - 16s 166ms/step - loss: 1.6111 - accuracy: 0.2319 - val_loss: 1.5970 - val_accuracy: 0.3122
Epoch 2/100
97/97 [==============================] - 16s 165ms/step - loss: 1.4949 - accuracy: 0.3381 - val_loss: 1.4846 - val_accuracy: 0.3768
Epoch 3/100
97/97 [==============================] - 16s 166ms/step - loss: 1.4234 - accuracy: 0.3897 - val_loss: 1.4371 - val_accuracy: 0.3915
Epoch 4/100
97/97 [==============================] - 16s 166ms/step - loss: 1.3957 - accuracy: 0.3900 - val_loss: 1.3906 - val_accuracy: 0.4256
Epoch 5/100
97/97 [==============================] - 16s 165ms/step - loss: 1.3378 - accuracy: 0.4402 - val_loss: 1.4182 - val_accuracy: 0.3817
Epoch 6/100
97/97 [==============================] - 16s 165ms/step - loss: 1.3306 - accuracy: 0.4282 - val_loss: 1.3514 - val_accuracy: 0.4537
Epoch 7/100
97/97 [==============================] - 16s 164ms/step - loss: 1.3242 - accuracy: 0.4278 - val_loss: 1.4304 - val_accuracy: 0.3963
Epoch 8/100
97/97 [==============================] - 16s 164ms/step - loss: 1.3290 - accuracy: 0.4380 - val_loss: 1.4269 - val_accuracy: 0.4159
Epoch 9/100
97/97 [==============================] - 16s 165ms/step - loss: 1.2777 - accuracy: 0.4711 - val_loss: 1.3816 - val_accuracy: 0.4134
Epoch 10/100
97/97 [==============================] - 16s 163ms/step - loss: 1.2711 - accuracy: 0.4665 - val_loss: 1.3840 - val_accuracy: 0.3976
Epoch 11/100
97/97 [==============================] - 16s 164ms/step - loss: 1.2526 - accuracy: 0.4866 - val_loss: 1.3307 - val_accuracy: 0.4427
Epoch 12/100
97/97 [==============================] - 16s 164ms/step - loss: 1.2372 - accuracy: 0.4959 - val_loss: 1.3203 - val_accuracy: 0.4634
Epoch 13/100
97/97 [==============================] - 16s 165ms/step - loss: 1.2364 - accuracy: 0.4851 - val_loss: 1.2928 - val_accuracy: 0.4805
Epoch 14/100
97/97 [==============================] - 16s 164ms/step - loss: 1.2125 - accuracy: 0.4969 - val_loss: 1.3084 - val_accuracy: 0.4634
Epoch 15/100
97/97 [==============================] - 16s 167ms/step - loss: 1.2067 - accuracy: 0.5026 - val_loss: 1.3058 - val_accuracy: 0.4780
Epoch 16/100
97/97 [==============================] - 16s 164ms/step - loss: 1.2022 - accuracy: 0.5279 - val_loss: 1.2533 - val_accuracy: 0.4951
Epoch 17/100
97/97 [==============================] - 16s 165ms/step - loss: 1.1782 - accuracy: 0.5155 - val_loss: 1.2766 - val_accuracy: 0.4915
Epoch 18/100
97/97 [==============================] - 16s 164ms/step - loss: 1.1579 - accuracy: 0.5361 - val_loss: 1.3191 - val_accuracy: 0.4537
Epoch 19/100
97/97 [==============================] - 16s 164ms/step - loss: 1.1761 - accuracy: 0.5191 - val_loss: 1.3709 - val_accuracy: 0.4488
Epoch 20/100
97/97 [==============================] - 16s 165ms/step - loss: 1.1228 - accuracy: 0.5486 - val_loss: 1.4135 - val_accuracy: 0.4329
Epoch 21/100
97/97 [==============================] - 16s 164ms/step - loss: 1.1576 - accuracy: 0.5253 - val_loss: 1.1797 - val_accuracy: 0.5244
Epoch 22/100
97/97 [==============================] - 16s 165ms/step - loss: 1.1269 - accuracy: 0.5521 - val_loss: 1.2269 - val_accuracy: 0.5256
Epoch 23/100
97/97 [==============================] - 16s 165ms/step - loss: 1.1459 - accuracy: 0.5553 - val_loss: 1.1690 - val_accuracy: 0.5207
Epoch 24/100
97/97 [==============================] - 16s 165ms/step - loss: 1.0922 - accuracy: 0.5491 - val_loss: 1.1214 - val_accuracy: 0.5512
Epoch 25/100
97/97 [==============================] - 16s 166ms/step - loss: 1.1222 - accuracy: 0.5439 - val_loss: 1.1434 - val_accuracy: 0.5537
Epoch 26/100
97/97 [==============================] - 16s 164ms/step - loss: 1.0741 - accuracy: 0.5737 - val_loss: 1.1065 - val_accuracy: 0.5817
Epoch 27/100
97/97 [==============================] - 16s 164ms/step - loss: 1.0891 - accuracy: 0.5646 - val_loss: 1.1656 - val_accuracy: 0.5415
Epoch 28/100
97/97 [==============================] - 16s 164ms/step - loss: 1.0588 - accuracy: 0.5795 - val_loss: 1.2878 - val_accuracy: 0.4890
Epoch 29/100
97/97 [==============================] - 16s 166ms/step - loss: 1.0532 - accuracy: 0.5861 - val_loss: 1.2436 - val_accuracy: 0.5280
Epoch 30/100
97/97 [==============================] - 16s 164ms/step - loss: 1.0431 - accuracy: 0.5770 - val_loss: 1.0921 - val_accuracy: 0.5756
Epoch 31/100
97/97 [==============================] - 16s 164ms/step - loss: 1.0399 - accuracy: 0.5925 - val_loss: 1.2592 - val_accuracy: 0.5073
Epoch 32/100
97/97 [==============================] - 16s 164ms/step - loss: 1.0513 - accuracy: 0.6018 - val_loss: 1.1170 - val_accuracy: 0.5732
Epoch 33/100
97/97 [==============================] - 16s 165ms/step - loss: 1.0634 - accuracy: 0.5945 - val_loss: 1.1300 - val_accuracy: 0.5451
Epoch 34/100
97/97 [==============================] - 16s 164ms/step - loss: 1.0307 - accuracy: 0.6046 - val_loss: 1.1856 - val_accuracy: 0.5305
Epoch 35/100
97/97 [==============================] - 16s 165ms/step - loss: 1.0445 - accuracy: 0.5997 - val_loss: 1.1683 - val_accuracy: 0.5524
Epoch 36/100
97/97 [==============================] - 16s 164ms/step - loss: 1.0155 - accuracy: 0.5919 - val_loss: 1.1072 - val_accuracy: 0.5902
Epoch 37/100
97/97 [==============================] - 16s 165ms/step - loss: 1.0183 - accuracy: 0.5976 - val_loss: 1.2148 - val_accuracy: 0.5476
Epoch 38/100
97/97 [==============================] - 16s 166ms/step - loss: 0.9905 - accuracy: 0.6165 - val_loss: 1.3012 - val_accuracy: 0.5122
Epoch 39/100
97/97 [==============================] - 16s 165ms/step - loss: 0.9879 - accuracy: 0.6139 - val_loss: 1.0900 - val_accuracy: 0.5720
Epoch 40/100
97/97 [==============================] - 16s 165ms/step - loss: 0.9824 - accuracy: 0.6348 - val_loss: 1.2799 - val_accuracy: 0.5329
Epoch 41/100
97/97 [==============================] - 16s 164ms/step - loss: 0.9732 - accuracy: 0.6271 - val_loss: 1.2122 - val_accuracy: 0.5500
Epoch 42/100
97/97 [==============================] - 16s 164ms/step - loss: 1.0069 - accuracy: 0.6214 - val_loss: 1.1372 - val_accuracy: 0.5707
Epoch 43/100
97/97 [==============================] - 16s 165ms/step - loss: 0.9730 - accuracy: 0.6255 - val_loss: 1.1713 - val_accuracy: 0.5354
Epoch 44/100
97/97 [==============================] - 16s 166ms/step - loss: 0.9842 - accuracy: 0.6124 - val_loss: 1.1539 - val_accuracy: 0.5671
Epoch 45/100
97/97 [==============================] - 16s 164ms/step - loss: 0.9558 - accuracy: 0.6281 - val_loss: 1.0785 - val_accuracy: 0.6049
Epoch 46/100
97/97 [==============================] - 16s 165ms/step - loss: 0.9396 - accuracy: 0.6462 - val_loss: 1.0587 - val_accuracy: 0.6110
Epoch 47/100
97/97 [==============================] - 16s 164ms/step - loss: 0.9362 - accuracy: 0.6459 - val_loss: 1.1725 - val_accuracy: 0.5732
Epoch 48/100
97/97 [==============================] - 16s 163ms/step - loss: 0.9722 - accuracy: 0.6410 - val_loss: 1.1238 - val_accuracy: 0.5793
Epoch 49/100
97/97 [==============================] - 16s 165ms/step - loss: 0.9542 - accuracy: 0.6327 - val_loss: 1.1088 - val_accuracy: 0.5805
Epoch 50/100
97/97 [==============================] - 16s 164ms/step - loss: 0.9404 - accuracy: 0.6464 - val_loss: 1.1348 - val_accuracy: 0.5707
Epoch 51/100
97/97 [==============================] - 16s 164ms/step - loss: 0.9025 - accuracy: 0.6531 - val_loss: 1.0083 - val_accuracy: 0.6134
Epoch 52/100
97/97 [==============================] - 16s 164ms/step - loss: 0.9198 - accuracy: 0.6508 - val_loss: 1.1118 - val_accuracy: 0.5951
Epoch 53/100
97/97 [==============================] - 16s 164ms/step - loss: 0.8861 - accuracy: 0.6603 - val_loss: 1.0299 - val_accuracy: 0.6073
Epoch 54/100
97/97 [==============================] - 16s 165ms/step - loss: 0.9208 - accuracy: 0.6513 - val_loss: 1.1205 - val_accuracy: 0.5817
Epoch 55/100
97/97 [==============================] - 16s 164ms/step - loss: 0.9442 - accuracy: 0.6477 - val_loss: 1.1353 - val_accuracy: 0.5768
Epoch 56/100
97/97 [==============================] - 16s 164ms/step - loss: 0.9465 - accuracy: 0.6529 - val_loss: 0.9860 - val_accuracy: 0.6329
Epoch 57/100
97/97 [==============================] - 16s 164ms/step - loss: 0.9027 - accuracy: 0.6663 - val_loss: 1.1099 - val_accuracy: 0.5890
Epoch 58/100
97/97 [==============================] - 16s 164ms/step - loss: 0.9540 - accuracy: 0.6327 - val_loss: 1.0612 - val_accuracy: 0.6134
Epoch 59/100
97/97 [==============================] - 16s 165ms/step - loss: 0.8974 - accuracy: 0.6689 - val_loss: 0.9689 - val_accuracy: 0.6476
Epoch 60/100
97/97 [==============================] - 16s 165ms/step - loss: 0.8724 - accuracy: 0.6720 - val_loss: 1.0741 - val_accuracy: 0.6134
Epoch 61/100
97/97 [==============================] - 16s 165ms/step - loss: 0.8793 - accuracy: 0.6653 - val_loss: 1.1270 - val_accuracy: 0.5878
Epoch 62/100
97/97 [==============================] - 16s 165ms/step - loss: 0.9015 - accuracy: 0.6555 - val_loss: 1.0414 - val_accuracy: 0.6244
Epoch 63/100
97/97 [==============================] - 16s 164ms/step - loss: 0.8629 - accuracy: 0.6705 - val_loss: 1.0875 - val_accuracy: 0.5988
Epoch 64/100
97/97 [==============================] - 16s 163ms/step - loss: 0.8946 - accuracy: 0.6653 - val_loss: 1.0904 - val_accuracy: 0.6183
Epoch 65/100
97/97 [==============================] - 16s 164ms/step - loss: 0.9036 - accuracy: 0.6598 - val_loss: 1.0074 - val_accuracy: 0.6317
Epoch 66/100
97/97 [==============================] - 16s 163ms/step - loss: 0.8749 - accuracy: 0.6622 - val_loss: 0.9530 - val_accuracy: 0.6366
Epoch 67/100
97/97 [==============================] - 16s 165ms/step - loss: 0.8484 - accuracy: 0.6906 - val_loss: 1.1597 - val_accuracy: 0.5744
Epoch 68/100
97/97 [==============================] - 16s 164ms/step - loss: 0.9033 - accuracy: 0.6498 - val_loss: 0.9029 - val_accuracy: 0.6573
Epoch 69/100
97/97 [==============================] - 16s 165ms/step - loss: 0.8401 - accuracy: 0.6870 - val_loss: 0.9403 - val_accuracy: 0.6646
Epoch 70/100
97/97 [==============================] - 16s 165ms/step - loss: 0.8491 - accuracy: 0.6856 - val_loss: 1.0205 - val_accuracy: 0.6159
Epoch 71/100
97/97 [==============================] - 16s 165ms/step - loss: 0.8590 - accuracy: 0.6840 - val_loss: 0.9257 - val_accuracy: 0.6549
Epoch 72/100
97/97 [==============================] - 16s 164ms/step - loss: 0.8327 - accuracy: 0.6809 - val_loss: 1.0285 - val_accuracy: 0.6366
Epoch 73/100
97/97 [==============================] - 16s 164ms/step - loss: 0.8266 - accuracy: 0.6952 - val_loss: 0.9369 - val_accuracy: 0.6463
Epoch 74/100
97/97 [==============================] - 16s 164ms/step - loss: 0.8651 - accuracy: 0.6725 - val_loss: 0.9063 - val_accuracy: 0.6476
Epoch 75/100
97/97 [==============================] - 16s 164ms/step - loss: 0.8059 - accuracy: 0.7014 - val_loss: 0.9477 - val_accuracy: 0.6610
Epoch 76/100
97/97 [==============================] - 16s 166ms/step - loss: 0.8294 - accuracy: 0.6897 - val_loss: 0.9500 - val_accuracy: 0.6720
Epoch 77/100
97/97 [==============================] - 16s 165ms/step - loss: 0.8388 - accuracy: 0.6897 - val_loss: 0.9407 - val_accuracy: 0.6659
Epoch 78/100
97/97 [==============================] - 16s 165ms/step - loss: 0.8184 - accuracy: 0.6912 - val_loss: 0.9157 - val_accuracy: 0.6695
Epoch 79/100
97/97 [==============================] - 16s 165ms/step - loss: 0.8294 - accuracy: 0.6974 - val_loss: 1.0591 - val_accuracy: 0.6280
Epoch 80/100
97/97 [==============================] - 16s 163ms/step - loss: 0.8200 - accuracy: 0.7035 - val_loss: 0.8964 - val_accuracy: 0.6524
Epoch 81/100
97/97 [==============================] - 16s 163ms/step - loss: 0.7960 - accuracy: 0.7025 - val_loss: 1.0995 - val_accuracy: 0.6159
Epoch 82/100
97/97 [==============================] - 16s 165ms/step - loss: 0.8670 - accuracy: 0.6808 - val_loss: 1.0700 - val_accuracy: 0.6195
Epoch 83/100
97/97 [==============================] - 16s 165ms/step - loss: 0.8492 - accuracy: 0.6825 - val_loss: 0.9574 - val_accuracy: 0.6744
Epoch 84/100
97/97 [==============================] - 16s 165ms/step - loss: 0.8157 - accuracy: 0.6890 - val_loss: 0.9339 - val_accuracy: 0.6317
Epoch 85/100
97/97 [==============================] - 16s 165ms/step - loss: 0.8152 - accuracy: 0.7066 - val_loss: 0.9375 - val_accuracy: 0.6402
Epoch 86/100
97/97 [==============================] - 16s 164ms/step - loss: 0.8477 - accuracy: 0.7004 - val_loss: 0.9676 - val_accuracy: 0.6427
Epoch 87/100
97/97 [==============================] - 16s 164ms/step - loss: 0.8229 - accuracy: 0.6978 - val_loss: 0.9089 - val_accuracy: 0.6598
Epoch 88/100
97/97 [==============================] - 16s 164ms/step - loss: 0.8164 - accuracy: 0.6963 - val_loss: 0.9757 - val_accuracy: 0.6390
Epoch 89/100
97/97 [==============================] - 16s 165ms/step - loss: 0.7854 - accuracy: 0.7123 - val_loss: 0.9617 - val_accuracy: 0.6451
Epoch 90/100
97/97 [==============================] - 16s 165ms/step - loss: 0.8320 - accuracy: 0.6932 - val_loss: 0.9468 - val_accuracy: 0.6451
Epoch 91/100
97/97 [==============================] - 16s 164ms/step - loss: 0.7913 - accuracy: 0.7088 - val_loss: 0.9948 - val_accuracy: 0.6341
Epoch 92/100
97/97 [==============================] - 16s 164ms/step - loss: 0.8054 - accuracy: 0.6994 - val_loss: 0.9125 - val_accuracy: 0.6659
Epoch 93/100
97/97 [==============================] - 16s 164ms/step - loss: 0.8093 - accuracy: 0.6916 - val_loss: 1.0123 - val_accuracy: 0.6268
Epoch 94/100
97/97 [==============================] - 16s 164ms/step - loss: 0.8003 - accuracy: 0.7046 - val_loss: 0.9888 - val_accuracy: 0.6451
Epoch 95/100
97/97 [==============================] - 16s 165ms/step - loss: 0.8106 - accuracy: 0.6840 - val_loss: 1.0545 - val_accuracy: 0.6280
Epoch 96/100
97/97 [==============================] - 16s 164ms/step - loss: 0.7772 - accuracy: 0.7149 - val_loss: 0.9354 - val_accuracy: 0.6488
Epoch 97/100
97/97 [==============================] - 16s 164ms/step - loss: 0.8078 - accuracy: 0.7107 - val_loss: 0.9774 - val_accuracy: 0.6451
Epoch 98/100
97/97 [==============================] - 16s 164ms/step - loss: 0.8105 - accuracy: 0.7113 - val_loss: 0.9434 - val_accuracy: 0.6573
Epoch 99/100
97/97 [==============================] - 16s 164ms/step - loss: 0.7852 - accuracy: 0.6947 - val_loss: 0.9736 - val_accuracy: 0.6561
Epoch 100/100
97/97 [==============================] - ETA: 0s - loss: 0.7817 - accuracy: 0.7092Restoring model weights from the end of the best epoch.
97/97 [==============================] - 16s 164ms/step - loss: 0.7817 - accuracy: 0.7092 - val_loss: 0.9514 - val_accuracy: 0.6573
Epoch 00100: early stopping

Get Prediction

In [22]:
def get_predictions(n):
    image1= validgen[0][0][n]
    #print(image1.shape)
    plt.imshow(image1)
    input_arr = tf.keras.preprocessing.image.img_to_array(validgen[0][0][n])
    input_arr = np.array([input_arr])  # Convert single image to a batch.
    predictions = model01[0].predict_classes(input_arr)
    #our dictionary starts from 1 whereas model has classes from 0.
    return insect_names[str(predictions[0]+1)]
In [23]:
get_predictions(13)
WARNING:tensorflow:From <ipython-input-22-16b6061831fc>:7: Sequential.predict_classes (from tensorflow.python.keras.engine.sequential) is deprecated and will be removed after 2021-01-01.
Instructions for updating:
Please use instead:* `np.argmax(model.predict(x), axis=-1)`,   if your model does multi-class classification   (e.g. if it uses a `softmax` last-layer activation).* `(model.predict(x) > 0.5).astype("int32")`,   if your model does binary classification   (e.g. if it uses a `sigmoid` last-layer activation).
Out[23]:
'Ladybird'

Save Model

In [24]:
model01[0].save(filepath='Dataset/saved_models/Model01.tf',save_format="tf")
WARNING:tensorflow:From /opt/tljh/user/lib/python3.7/site-packages/tensorflow/python/training/tracking/tracking.py:111: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.
Instructions for updating:
This property should not be used in TensorFlow 2.0, as updates are applied automatically.
WARNING:tensorflow:From /opt/tljh/user/lib/python3.7/site-packages/tensorflow/python/training/tracking/tracking.py:111: Layer.updates (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.
Instructions for updating:
This property should not be used in TensorFlow 2.0, as updates are applied automatically.
INFO:tensorflow:Assets written to: Dataset/saved_models/Model01.tf/assets

DeepCC

In [53]:
!deepC 'Dataset/saved_models/Model01.tf'
/bin/bash: deepC: command not found
In [ ]: