Cainvas
Model Files
model.h5
keras
Model
deepSea Compiled Models
model.exe
deepSea
Ubuntu

Flower Classification

Credit: AITS Cainvas Community

Photo by dongkyu lim on Dribbble

Unzip Data

In [1]:
!wget https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/Flower_Color.zip
--2021-08-27 04:08:15--  https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/Flower_Color.zip
Resolving cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)... 52.219.160.71
Connecting to cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)|52.219.160.71|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 52779776 (50M) [application/x-zip-compressed]
Saving to: ‘Flower_Color.zip’

Flower_Color.zip    100%[===================>]  50.33M   101MB/s    in 0.5s    

2021-08-27 04:08:15 (101 MB/s) - ‘Flower_Color.zip’ saved [52779776/52779776]

In [2]:
!unzip -qo Flower_Color.zip

Import Libraries

In [3]:
import numpy as np 
import pandas as pd

from PIL import ImageFile
from tqdm import tqdm
import h5py
import cv2

import matplotlib.pylab as plt
from matplotlib import cm
%matplotlib inline

from sklearn.model_selection import train_test_split

from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing import image as keras_image

from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, LSTM, GlobalAveragePooling1D, GlobalAveragePooling2D
from tensorflow.keras.layers import Activation, Flatten, Dropout, BatchNormalization
from tensorflow.keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from tensorflow.keras.layers import PReLU, LeakyReLU
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
import seaborn as sns

Data Preprocessing

Load image and convert into array

In [4]:
def image_to_tensor(img_path):
    img = keras_image.load_img("Flower Color/flower_images/" + img_path, target_size=(128, 128))
    x = keras_image.img_to_array(img)
    return np.expand_dims(x, axis=0)

Convert images into tensors

In [5]:
def data_to_tensor(img_paths):
    list_of_tensors = [image_to_tensor(img_path) for img_path in tqdm(img_paths)]
    return np.vstack(list_of_tensors)

Load the data

In [6]:
ImageFile.LOAD_TRUNCATED_IMAGES = True 
# Load the data
data = pd.read_csv("Flower Color/flower_images/flower_labels.csv")
files = data['file']
targets = data['label'].values
tensors = data_to_tensor(files);
100%|██████████| 210/210 [00:00<00:00, 686.39it/s]
In [7]:
data.head()
Out[7]:
file label
0 0001.png 0
1 0002.png 0
2 0003.png 2
3 0004.png 0
4 0005.png 0
In [8]:
tensors.shape
Out[8]:
(210, 128, 128, 3)
In [9]:
targets.shape
Out[9]:
(210,)
In [10]:
# Name list
names = ['phlox', 'rose', 'calendula', 'iris', 'max chrysanthemum', 
         'bellflower', 'viola', 'rudbeckia laciniata', 'peony', 'aquilegia']

Display sample images

In [11]:
def display_images(img_path, ax):
    img = cv2.imread("Flower Color/flower_images/" + img_path)
    ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
In [12]:
fig = plt.figure(figsize=(20, 10))
for i in range(4):
    ax = fig.add_subplot(2, 4, i + 1, xticks=[], yticks=[])
    ax.set_title(names[targets[i+10]], color='yellow')
    display_images(files[i+10], ax)

Save data in h5 file for future use

In [13]:
# Create h5 file and save data
with h5py.File('FlowerColorImages.h5', 'w') as f:
    f.create_dataset('images', data = tensors)
    f.create_dataset('labels', data = targets)
    f.close()
In [14]:
# Read the h5 file
f = h5py.File('FlowerColorImages.h5', 'r')

# List all groups
keys = list(f.keys())
keys
Out[14]:
['images', 'labels']

Create tensors and targets

In [15]:
tensors = np.array(f[keys[0]])
targets = np.array(f[keys[1]])
print ('Tensor shape:', tensors.shape)
print ('Target shape', targets.shape)
Tensor shape: (210, 128, 128, 3)
Target shape (210,)
In [16]:
# Create a csv file and save data
images_csv = tensors.reshape(210,128*128*3)
np.savetxt("flower_images.csv", images_csv, fmt='%i', delimiter=",")
In [17]:
# Read the pandas dataframe from csv
data_images = pd.read_csv("flower_images.csv", header=None)
data_images.head()
Out[17]:
0 1 2 3 4 5 6 7 8 9 ... 49142 49143 49144 49145 49146 49147 49148 49149 49150 49151
0 13 22 10 14 23 9 16 24 10 16 ... 44 59 38 38 26 25 16 117 74 78
1 38 49 30 37 50 30 38 52 30 40 ... 9 9 10 8 7 7 7 5 5 5
2 65 83 48 72 87 58 74 90 62 81 ... 35 52 70 38 56 74 41 58 76 42
3 162 53 102 147 66 91 156 80 97 169 ... 69 105 44 67 103 43 68 103 43 68
4 193 52 78 194 51 76 195 58 85 197 ... 199 253 179 200 248 163 189 229 120 154

5 rows × 49152 columns

In [18]:
data_images.iloc[:10,:10]
Out[18]:
0 1 2 3 4 5 6 7 8 9
0 13 22 10 14 23 9 16 24 10 16
1 38 49 30 37 50 30 38 52 30 40
2 65 83 48 72 87 58 74 90 62 81
3 162 53 102 147 66 91 156 80 97 169
4 193 52 78 194 51 76 195 58 85 197
5 53 76 55 53 76 55 53 77 56 53
6 8 9 8 8 9 9 9 9 9 8
7 9 9 8 9 9 9 8 8 8 8
8 195 127 169 188 118 160 135 76 101 55
9 7 7 7 8 7 7 8 9 8 9
In [19]:
data_images.shape
Out[19]:
(210, 49152)
In [20]:
# Read image tensors from the dataframe
tensors = data_images.values
tensors.shape
Out[20]:
(210, 49152)
In [21]:
tensors = tensors.reshape(-1,128,128,3)
tensors.shape
Out[21]:
(210, 128, 128, 3)

Normalize the tensors

In [22]:
tensors = tensors.astype('float32')/255

One-hot encode the targets

In [23]:
targets = to_categorical(targets, 10)

Split the data

In [24]:
x_train, x_test, y_train, y_test = train_test_split(tensors, targets, 
                                                    test_size = 0.2, 
                                                    random_state = 1)
n = int(len(x_test)/2)
x_valid, y_valid = x_test[:n], y_test[:n]
x_test, y_test = x_test[n:], y_test[n:]
In [25]:
x_train.shape, y_train.shape
Out[25]:
((168, 128, 128, 3), (168, 10))
In [26]:
x_test.shape, y_test.shape
Out[26]:
((21, 128, 128, 3), (21, 10))
In [27]:
 x_valid.shape, y_valid.shape
Out[27]:
((21, 128, 128, 3), (21, 10))
In [28]:
# Read and display a tensor
print('Label: ', names[np.argmax(y_train[7])])
plt.figure(figsize=(3,3))
plt.imshow((x_train[7]));
Label:  rose

Define Model and Train

Define the model

In [29]:
def model():
    model = Sequential()

    model.add(Conv2D(128, (3, 3), input_shape=x_train.shape[1:]))
    model.add(LeakyReLU(alpha=0.02))
    
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(128, (3, 3)))
    model.add(LeakyReLU(alpha=0.02))
    
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(GlobalMaxPooling2D())
    
    model.add(Dense(512))
    model.add(LeakyReLU(alpha=0.02))
    model.add(Dropout(0.5)) 

    model.add(Dense(10))
    model.add(Activation('softmax'))
    
    # TODO: Compile the model
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    
    return model

model = model()

Create callbacks

In [30]:
# To save the best model
checkpointer = ModelCheckpoint(filepath='weights.best.model.hdf5', 
                               verbose=2, save_best_only=True)

# To reduce learning rate dynamically
lr_reduction = ReduceLROnPlateau(monitor='val_loss', 
                                 patience=5, verbose=2, factor=0.2)
In [31]:
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 126, 126, 128)     3584      
_________________________________________________________________
leaky_re_lu (LeakyReLU)      (None, 126, 126, 128)     0         
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 63, 63, 128)       0         
_________________________________________________________________
dropout (Dropout)            (None, 63, 63, 128)       0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 61, 61, 128)       147584    
_________________________________________________________________
leaky_re_lu_1 (LeakyReLU)    (None, 61, 61, 128)       0         
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 30, 30, 128)       0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 30, 30, 128)       0         
_________________________________________________________________
global_max_pooling2d (Global (None, 128)               0         
_________________________________________________________________
dense (Dense)                (None, 512)               66048     
_________________________________________________________________
leaky_re_lu_2 (LeakyReLU)    (None, 512)               0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 512)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 10)                5130      
_________________________________________________________________
activation (Activation)      (None, 10)                0         
=================================================================
Total params: 222,346
Trainable params: 222,346
Non-trainable params: 0
_________________________________________________________________

Train the model

In [32]:
history = model.fit(x_train, y_train, 
                    epochs=75, batch_size=32, verbose=2,
                    validation_data=(x_valid, y_valid),
                    callbacks=[checkpointer,lr_reduction])
Epoch 1/75

Epoch 00001: val_loss improved from inf to 2.29284, saving model to weights.best.model.hdf5
6/6 - 1s - loss: 2.3273 - accuracy: 0.0833 - val_loss: 2.2928 - val_accuracy: 0.1905
Epoch 2/75

Epoch 00002: val_loss improved from 2.29284 to 2.26371, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 2.2387 - accuracy: 0.1845 - val_loss: 2.2637 - val_accuracy: 0.2381
Epoch 3/75

Epoch 00003: val_loss improved from 2.26371 to 2.20431, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 2.1673 - accuracy: 0.2440 - val_loss: 2.2043 - val_accuracy: 0.2857
Epoch 4/75

Epoch 00004: val_loss improved from 2.20431 to 2.11681, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 2.0648 - accuracy: 0.2917 - val_loss: 2.1168 - val_accuracy: 0.2857
Epoch 5/75

Epoch 00005: val_loss improved from 2.11681 to 1.97873, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 1.9040 - accuracy: 0.3631 - val_loss: 1.9787 - val_accuracy: 0.2857
Epoch 6/75

Epoch 00006: val_loss improved from 1.97873 to 1.91766, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 1.7358 - accuracy: 0.3750 - val_loss: 1.9177 - val_accuracy: 0.2857
Epoch 7/75

Epoch 00007: val_loss improved from 1.91766 to 1.66094, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 1.5620 - accuracy: 0.4940 - val_loss: 1.6609 - val_accuracy: 0.5238
Epoch 8/75

Epoch 00008: val_loss improved from 1.66094 to 1.54199, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 1.4386 - accuracy: 0.4702 - val_loss: 1.5420 - val_accuracy: 0.4762
Epoch 9/75

Epoch 00009: val_loss improved from 1.54199 to 1.50875, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 1.3967 - accuracy: 0.4405 - val_loss: 1.5088 - val_accuracy: 0.3810
Epoch 10/75

Epoch 00010: val_loss improved from 1.50875 to 1.39188, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 1.2534 - accuracy: 0.5833 - val_loss: 1.3919 - val_accuracy: 0.5238
Epoch 11/75

Epoch 00011: val_loss improved from 1.39188 to 1.32259, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 1.2138 - accuracy: 0.5833 - val_loss: 1.3226 - val_accuracy: 0.5238
Epoch 12/75

Epoch 00012: val_loss improved from 1.32259 to 1.27529, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 1.1561 - accuracy: 0.5655 - val_loss: 1.2753 - val_accuracy: 0.4762
Epoch 13/75

Epoch 00013: val_loss improved from 1.27529 to 1.24442, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 1.1170 - accuracy: 0.5893 - val_loss: 1.2444 - val_accuracy: 0.5714
Epoch 14/75

Epoch 00014: val_loss improved from 1.24442 to 1.16504, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 1.1292 - accuracy: 0.6369 - val_loss: 1.1650 - val_accuracy: 0.6667
Epoch 15/75

Epoch 00015: val_loss did not improve from 1.16504
6/6 - 0s - loss: 0.9825 - accuracy: 0.6964 - val_loss: 1.2202 - val_accuracy: 0.5714
Epoch 16/75

Epoch 00016: val_loss improved from 1.16504 to 1.07238, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.9460 - accuracy: 0.6667 - val_loss: 1.0724 - val_accuracy: 0.6667
Epoch 17/75

Epoch 00017: val_loss improved from 1.07238 to 1.01282, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.9414 - accuracy: 0.6548 - val_loss: 1.0128 - val_accuracy: 0.6190
Epoch 18/75

Epoch 00018: val_loss improved from 1.01282 to 0.97157, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.8470 - accuracy: 0.7024 - val_loss: 0.9716 - val_accuracy: 0.7143
Epoch 19/75

Epoch 00019: val_loss did not improve from 0.97157
6/6 - 0s - loss: 0.8980 - accuracy: 0.7024 - val_loss: 1.0308 - val_accuracy: 0.7143
Epoch 20/75

Epoch 00020: val_loss improved from 0.97157 to 0.95376, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.8400 - accuracy: 0.6607 - val_loss: 0.9538 - val_accuracy: 0.5714
Epoch 21/75

Epoch 00021: val_loss improved from 0.95376 to 0.89521, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.9158 - accuracy: 0.6845 - val_loss: 0.8952 - val_accuracy: 0.7619
Epoch 22/75

Epoch 00022: val_loss did not improve from 0.89521
6/6 - 0s - loss: 0.8557 - accuracy: 0.7024 - val_loss: 0.9407 - val_accuracy: 0.7143
Epoch 23/75

Epoch 00023: val_loss did not improve from 0.89521
6/6 - 0s - loss: 0.7360 - accuracy: 0.7321 - val_loss: 0.9693 - val_accuracy: 0.6190
Epoch 24/75

Epoch 00024: val_loss improved from 0.89521 to 0.87605, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.7832 - accuracy: 0.6667 - val_loss: 0.8760 - val_accuracy: 0.7143
Epoch 25/75

Epoch 00025: val_loss improved from 0.87605 to 0.84709, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.7381 - accuracy: 0.7083 - val_loss: 0.8471 - val_accuracy: 0.6667
Epoch 26/75

Epoch 00026: val_loss improved from 0.84709 to 0.78666, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.6582 - accuracy: 0.7917 - val_loss: 0.7867 - val_accuracy: 0.8095
Epoch 27/75

Epoch 00027: val_loss did not improve from 0.78666
6/6 - 0s - loss: 0.6352 - accuracy: 0.7857 - val_loss: 0.8342 - val_accuracy: 0.6190
Epoch 28/75

Epoch 00028: val_loss improved from 0.78666 to 0.74704, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.6608 - accuracy: 0.7679 - val_loss: 0.7470 - val_accuracy: 0.8571
Epoch 29/75

Epoch 00029: val_loss improved from 0.74704 to 0.72281, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.6745 - accuracy: 0.7500 - val_loss: 0.7228 - val_accuracy: 0.7143
Epoch 30/75

Epoch 00030: val_loss improved from 0.72281 to 0.67308, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.6037 - accuracy: 0.7917 - val_loss: 0.6731 - val_accuracy: 0.7619
Epoch 31/75

Epoch 00031: val_loss did not improve from 0.67308
6/6 - 0s - loss: 0.6433 - accuracy: 0.7500 - val_loss: 0.8629 - val_accuracy: 0.6190
Epoch 32/75

Epoch 00032: val_loss improved from 0.67308 to 0.64407, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.6362 - accuracy: 0.7738 - val_loss: 0.6441 - val_accuracy: 0.9048
Epoch 33/75

Epoch 00033: val_loss did not improve from 0.64407
6/6 - 0s - loss: 0.6201 - accuracy: 0.7738 - val_loss: 0.7369 - val_accuracy: 0.7143
Epoch 34/75

Epoch 00034: val_loss did not improve from 0.64407
6/6 - 0s - loss: 0.5571 - accuracy: 0.7917 - val_loss: 0.7132 - val_accuracy: 0.7619
Epoch 35/75

Epoch 00035: val_loss did not improve from 0.64407
6/6 - 0s - loss: 0.5550 - accuracy: 0.8214 - val_loss: 0.6495 - val_accuracy: 0.8095
Epoch 36/75

Epoch 00036: val_loss did not improve from 0.64407
6/6 - 0s - loss: 0.5365 - accuracy: 0.8095 - val_loss: 0.7280 - val_accuracy: 0.7143
Epoch 37/75

Epoch 00037: val_loss did not improve from 0.64407

Epoch 00037: ReduceLROnPlateau reducing learning rate to 0.00020000000949949026.
6/6 - 0s - loss: 0.6111 - accuracy: 0.7917 - val_loss: 0.7562 - val_accuracy: 0.7619
Epoch 38/75

Epoch 00038: val_loss did not improve from 0.64407
6/6 - 0s - loss: 0.6186 - accuracy: 0.7857 - val_loss: 0.6752 - val_accuracy: 0.7619
Epoch 39/75

Epoch 00039: val_loss improved from 0.64407 to 0.63240, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.5010 - accuracy: 0.8274 - val_loss: 0.6324 - val_accuracy: 0.7619
Epoch 40/75

Epoch 00040: val_loss improved from 0.63240 to 0.62954, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.4911 - accuracy: 0.8214 - val_loss: 0.6295 - val_accuracy: 0.8095
Epoch 41/75

Epoch 00041: val_loss did not improve from 0.62954
6/6 - 0s - loss: 0.4395 - accuracy: 0.8452 - val_loss: 0.6514 - val_accuracy: 0.8095
Epoch 42/75

Epoch 00042: val_loss did not improve from 0.62954
6/6 - 0s - loss: 0.5337 - accuracy: 0.7976 - val_loss: 0.6583 - val_accuracy: 0.8571
Epoch 43/75

Epoch 00043: val_loss did not improve from 0.62954
6/6 - 0s - loss: 0.4387 - accuracy: 0.8571 - val_loss: 0.6372 - val_accuracy: 0.8571
Epoch 44/75

Epoch 00044: val_loss improved from 0.62954 to 0.61377, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.4310 - accuracy: 0.8452 - val_loss: 0.6138 - val_accuracy: 0.8571
Epoch 45/75

Epoch 00045: val_loss improved from 0.61377 to 0.58615, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.4101 - accuracy: 0.8452 - val_loss: 0.5862 - val_accuracy: 0.8571
Epoch 46/75

Epoch 00046: val_loss improved from 0.58615 to 0.57815, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.3981 - accuracy: 0.8571 - val_loss: 0.5781 - val_accuracy: 0.8095
Epoch 47/75

Epoch 00047: val_loss improved from 0.57815 to 0.55670, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.4260 - accuracy: 0.8393 - val_loss: 0.5567 - val_accuracy: 0.8571
Epoch 48/75

Epoch 00048: val_loss improved from 0.55670 to 0.55651, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.3945 - accuracy: 0.8512 - val_loss: 0.5565 - val_accuracy: 0.8571
Epoch 49/75

Epoch 00049: val_loss did not improve from 0.55651
6/6 - 0s - loss: 0.4284 - accuracy: 0.8512 - val_loss: 0.5659 - val_accuracy: 0.8571
Epoch 50/75

Epoch 00050: val_loss did not improve from 0.55651
6/6 - 0s - loss: 0.4396 - accuracy: 0.8214 - val_loss: 0.5896 - val_accuracy: 0.8095
Epoch 51/75

Epoch 00051: val_loss did not improve from 0.55651
6/6 - 0s - loss: 0.4104 - accuracy: 0.8393 - val_loss: 0.6106 - val_accuracy: 0.8571
Epoch 52/75

Epoch 00052: val_loss did not improve from 0.55651
6/6 - 0s - loss: 0.4052 - accuracy: 0.8512 - val_loss: 0.5764 - val_accuracy: 0.8571
Epoch 53/75

Epoch 00053: val_loss improved from 0.55651 to 0.55070, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.3924 - accuracy: 0.8571 - val_loss: 0.5507 - val_accuracy: 0.8571
Epoch 54/75

Epoch 00054: val_loss did not improve from 0.55070
6/6 - 0s - loss: 0.4008 - accuracy: 0.8512 - val_loss: 0.5547 - val_accuracy: 0.8571
Epoch 55/75

Epoch 00055: val_loss did not improve from 0.55070
6/6 - 0s - loss: 0.3240 - accuracy: 0.8869 - val_loss: 0.5767 - val_accuracy: 0.8571
Epoch 56/75

Epoch 00056: val_loss did not improve from 0.55070
6/6 - 0s - loss: 0.3817 - accuracy: 0.8810 - val_loss: 0.5507 - val_accuracy: 0.8571
Epoch 57/75

Epoch 00057: val_loss improved from 0.55070 to 0.53697, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.4187 - accuracy: 0.8512 - val_loss: 0.5370 - val_accuracy: 0.8571
Epoch 58/75

Epoch 00058: val_loss improved from 0.53697 to 0.53378, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.3852 - accuracy: 0.8571 - val_loss: 0.5338 - val_accuracy: 0.8571
Epoch 59/75

Epoch 00059: val_loss did not improve from 0.53378
6/6 - 0s - loss: 0.3842 - accuracy: 0.8512 - val_loss: 0.5752 - val_accuracy: 0.8571
Epoch 60/75

Epoch 00060: val_loss did not improve from 0.53378
6/6 - 0s - loss: 0.3812 - accuracy: 0.8869 - val_loss: 0.5513 - val_accuracy: 0.8095
Epoch 61/75

Epoch 00061: val_loss improved from 0.53378 to 0.52113, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.3865 - accuracy: 0.8571 - val_loss: 0.5211 - val_accuracy: 0.8095
Epoch 62/75

Epoch 00062: val_loss did not improve from 0.52113
6/6 - 0s - loss: 0.3975 - accuracy: 0.8393 - val_loss: 0.5402 - val_accuracy: 0.8571
Epoch 63/75

Epoch 00063: val_loss did not improve from 0.52113
6/6 - 0s - loss: 0.3670 - accuracy: 0.8452 - val_loss: 0.5475 - val_accuracy: 0.8571
Epoch 64/75

Epoch 00064: val_loss did not improve from 0.52113
6/6 - 0s - loss: 0.3578 - accuracy: 0.8750 - val_loss: 0.5266 - val_accuracy: 0.8571
Epoch 65/75

Epoch 00065: val_loss improved from 0.52113 to 0.51370, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.3419 - accuracy: 0.8929 - val_loss: 0.5137 - val_accuracy: 0.8571
Epoch 66/75

Epoch 00066: val_loss did not improve from 0.51370
6/6 - 0s - loss: 0.3467 - accuracy: 0.8929 - val_loss: 0.5366 - val_accuracy: 0.8571
Epoch 67/75

Epoch 00067: val_loss did not improve from 0.51370
6/6 - 0s - loss: 0.3277 - accuracy: 0.8810 - val_loss: 0.5330 - val_accuracy: 0.8571
Epoch 68/75

Epoch 00068: val_loss improved from 0.51370 to 0.50458, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.3381 - accuracy: 0.8988 - val_loss: 0.5046 - val_accuracy: 0.8571
Epoch 69/75

Epoch 00069: val_loss improved from 0.50458 to 0.50315, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.3387 - accuracy: 0.8929 - val_loss: 0.5032 - val_accuracy: 0.8571
Epoch 70/75

Epoch 00070: val_loss did not improve from 0.50315
6/6 - 0s - loss: 0.3322 - accuracy: 0.9107 - val_loss: 0.5033 - val_accuracy: 0.8571
Epoch 71/75

Epoch 00071: val_loss did not improve from 0.50315
6/6 - 0s - loss: 0.3556 - accuracy: 0.8810 - val_loss: 0.5380 - val_accuracy: 0.8571
Epoch 72/75

Epoch 00072: val_loss did not improve from 0.50315
6/6 - 0s - loss: 0.3603 - accuracy: 0.8512 - val_loss: 0.5556 - val_accuracy: 0.8095
Epoch 73/75

Epoch 00073: val_loss did not improve from 0.50315
6/6 - 0s - loss: 0.3212 - accuracy: 0.8750 - val_loss: 0.5084 - val_accuracy: 0.9048
Epoch 74/75

Epoch 00074: val_loss improved from 0.50315 to 0.47558, saving model to weights.best.model.hdf5
6/6 - 0s - loss: 0.3348 - accuracy: 0.9107 - val_loss: 0.4756 - val_accuracy: 0.9524
Epoch 75/75

Epoch 00075: val_loss did not improve from 0.47558
6/6 - 0s - loss: 0.3283 - accuracy: 0.8869 - val_loss: 0.4908 - val_accuracy: 0.9524

Train the model with image generation

In [33]:
data_generator = keras_image.ImageDataGenerator(shear_range=0.3, 
                                                zoom_range=0.3,
                                                rotation_range=30,
                                                horizontal_flip=True)

dg_history = model.fit_generator(data_generator.flow(x_train, y_train, batch_size=64),
                                 steps_per_epoch = len(x_train)//64, epochs=7, verbose=2, 
                                 validation_data=(x_valid, y_valid),
                                 callbacks=[checkpointer,lr_reduction])
WARNING:tensorflow:From <ipython-input-33-0a6a602e8ef1>:9: Model.fit_generator (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.
Instructions for updating:
Please use Model.fit, which supports generators.
Epoch 1/7

Epoch 00001: val_loss did not improve from 0.47558
2/2 - 0s - loss: 0.3978 - accuracy: 0.8594 - val_loss: 0.5165 - val_accuracy: 0.8571
Epoch 2/7

Epoch 00002: val_loss did not improve from 0.47558
2/2 - 0s - loss: 0.4320 - accuracy: 0.8438 - val_loss: 0.5556 - val_accuracy: 0.8095
Epoch 3/7

Epoch 00003: val_loss did not improve from 0.47558
2/2 - 0s - loss: 0.5079 - accuracy: 0.8462 - val_loss: 0.5963 - val_accuracy: 0.7619
Epoch 4/7

Epoch 00004: val_loss did not improve from 0.47558
2/2 - 0s - loss: 0.4423 - accuracy: 0.8558 - val_loss: 0.6188 - val_accuracy: 0.7619
Epoch 5/7

Epoch 00005: val_loss did not improve from 0.47558
2/2 - 0s - loss: 0.3856 - accuracy: 0.8750 - val_loss: 0.6051 - val_accuracy: 0.7619
Epoch 6/7

Epoch 00006: val_loss did not improve from 0.47558

Epoch 00006: ReduceLROnPlateau reducing learning rate to 4.0000001899898055e-05.
2/2 - 0s - loss: 0.4019 - accuracy: 0.8462 - val_loss: 0.5754 - val_accuracy: 0.7619
Epoch 7/7

Epoch 00007: val_loss did not improve from 0.47558
2/2 - 0s - loss: 0.3625 - accuracy: 0.8654 - val_loss: 0.5692 - val_accuracy: 0.8095
In [34]:
# Load the model with the best validation accuracy
model.load_weights('weights.best.model.hdf5')

Model Evaluation

In [36]:
# Calculate classification accuracy on the testing set
score = model.evaluate(x_test, y_test)
score
1/1 [==============================] - 0s 989us/step - loss: 0.7078 - accuracy: 0.7619
Out[36]:
[0.7077974677085876, 0.761904776096344]
In [38]:
# Calculate classification accuracy on the training set
score = model.evaluate(x_train, y_train)
score
6/6 [==============================] - 0s 19ms/step - loss: 0.3418 - accuracy: 0.9345
Out[38]:
[0.34182748198509216, 0.9345238208770752]
In [40]:
# Calculate classification accuracy on the validation set
score = model.evaluate(x_valid, y_valid)
score
1/1 [==============================] - 0s 1ms/step - loss: 0.4756 - accuracy: 0.9524
Out[40]:
[0.4755815267562866, 0.9523809552192688]

Accuracy Plot

In [41]:
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

Loss Plot

In [42]:
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
In [43]:
model.save('model.h5')
In [44]:
model1 = load_model('model.h5')

Predictions

In [45]:
# Model predictions for the testing dataset
y_test_predict = model1.predict(x_test)
In [46]:
y_test_predict = np.argmax(y_test_predict,axis=1)

Display Predictions

In [47]:
# Display true labels and predictions
fig = plt.figure(figsize=(18, 18))
for i, idx in enumerate(np.random.choice(x_test.shape[0], size=16, replace=False)):
    ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
    ax.imshow(np.squeeze(x_test[idx]))
    pred_idx = y_test_predict[idx]
    true_idx = np.argmax(y_test[idx])
    ax.set_title("{} ({})".format(names[pred_idx], names[true_idx]),
                 color=("#4876ff" if pred_idx == true_idx else "darkred"))

Plot Confusion Matrix

In [48]:
y_test_orig = []
In [49]:
for i in y_test:
  y_test_orig.append(np.argmax(i)) 
In [50]:
y_test_orig = np.array(y_test_orig)
In [51]:
y_test_orig
Out[51]:
array([6, 3, 4, 9, 2, 0, 7, 6, 1, 2, 4, 0, 6, 0, 3, 1, 6, 5, 5, 7, 1])
In [52]:
y_test_predict
Out[52]:
array([6, 5, 4, 9, 2, 0, 7, 9, 8, 2, 4, 8, 6, 0, 3, 7, 6, 5, 5, 7, 1])
In [53]:
#Plotting a confusion matrix
cnf = confusion_matrix(y_test_predict,y_test_orig)


df_cnf = pd.DataFrame(cnf, range(10), range(10))
sns.heatmap(df_cnf, annot = True)
plt.title("Confusion Matrix")
plt.show()

DeepCC

In [54]:
!deepCC model.h5
[INFO]
Reading [keras model] 'model.h5'
[SUCCESS]
Saved 'model_deepC/model.onnx'
[INFO]
Reading [onnx model] 'model_deepC/model.onnx'
[INFO]
Model info:
  ir_vesion : 5
  doc       : 
[WARNING]
[ONNX]: terminal (input/output) conv2d_input's shape is less than 1. Changing it to 1.
[WARNING]
[ONNX]: terminal (input/output) activation's shape is less than 1. Changing it to 1.
[INFO]
Running DNNC graph sanity check ...
[SUCCESS]
Passed sanity check.
[INFO]
Writing C++ file 'model_deepC/model.cpp'
[INFO]
deepSea model files are ready in 'model_deepC/' 
[RUNNING COMMAND]
g++ -std=c++11 -O3 -fno-rtti -fno-exceptions -I. -I/opt/tljh/user/lib/python3.7/site-packages/deepC-0.13-py3.7-linux-x86_64.egg/deepC/include -isystem /opt/tljh/user/lib/python3.7/site-packages/deepC-0.13-py3.7-linux-x86_64.egg/deepC/packages/eigen-eigen-323c052e1731 "model_deepC/model.cpp" -D_AITS_MAIN -o "model_deepC/model.exe"
[RUNNING COMMAND]
size "model_deepC/model.exe"
   text	   data	    bss	    dec	    hex	filename
1066025	   4168	    760	1070953	 105769	model_deepC/model.exe
[SUCCESS]
Saved model as executable "model_deepC/model.exe"