Cainvas
Model Files
best.h5
keras
Model
deepSea Compiled Models
best.exe
deepSea
Ubuntu

Shape Classification

Credit: AITS Cainvas Community

Photo by Margarita Ivanchikova on Dribbble

In [1]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import warnings
import os
import tensorflow as tf
warnings.filterwarnings('ignore')
import shutil
from random import shuffle

Unzipping Data

In [2]:
!wget https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/archive_1.zip

!unzip -qo archive_1.zip

# zip folder is not needed anymore
!rm archive_1.zip
--2021-09-07 09:22:34--  https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/archive_1.zip
Resolving cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)... 52.219.62.4
Connecting to cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)|52.219.62.4|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 411916 (402K) [application/x-zip-compressed]
Saving to: ‘archive_1.zip’

archive_1.zip       100%[===================>] 402.26K  --.-KB/s    in 0.005s  

2021-09-07 09:22:34 (79.8 MB/s) - ‘archive_1.zip’ saved [411916/411916]

In [3]:
shapes = ['triangles', 'circles', 'squares']

path= 'shapes/'

files= []
result= []

for shape in shapes:
    new_path= path+shape
    
    for file in os.listdir(new_path):
        files.append(os.path.join(new_path,file))
        result.append(shape)
In [4]:
len(files)
Out[4]:
300
In [5]:
len(result)
Out[5]:
300
In [6]:
files[:5]
Out[6]:
['shapes/triangles/drawing(3).png',
 'shapes/triangles/drawing(16).png',
 'shapes/triangles/drawing(71).png',
 'shapes/triangles/drawing(68).png',
 'shapes/triangles/drawing(75).png']
In [7]:
from skimage import color
images=[]
for file in files:
    img= plt.imread(file)
    img= color.rgb2gray(img)
    img= img.ravel()
    images.append(img)
In [8]:
len(images)
Out[8]:
300

Data Visualsation

In [9]:
df= pd.DataFrame(images)
In [10]:
df.head()
Out[10]:
0 1 2 3 4 5 6 7 8 9 ... 774 775 776 777 778 779 780 781 782 783
0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0
1 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0
2 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0
3 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0
4 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0

5 rows × 784 columns

In [11]:
df['result']= result
df.head()
Out[11]:
0 1 2 3 4 5 6 7 8 9 ... 775 776 777 778 779 780 781 782 783 result
0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 triangles
1 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 triangles
2 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 triangles
3 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 triangles
4 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 triangles

5 rows × 785 columns

In [12]:
from sklearn.utils import shuffle

df= shuffle(df)
df.head()
Out[12]:
0 1 2 3 4 5 6 7 8 9 ... 775 776 777 778 779 780 781 782 783 result
27 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 triangles
127 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 circles
113 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 circles
73 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 triangles
293 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 squares

5 rows × 785 columns

In [13]:
df['result']= df['result'].replace({'circles':0, 'triangles':1, 'squares':2})
df.head()
Out[13]:
0 1 2 3 4 5 6 7 8 9 ... 775 776 777 778 779 780 781 782 783 result
27 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1
127 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 0
113 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 0
73 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1
293 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 2

5 rows × 785 columns

Dropping useless column

In [14]:
y= df['result']
X= df.drop('result', axis=1)
In [15]:
X= X.values.reshape(-1,28,28,1)

Uniform Type Conversion

In [16]:
from tensorflow.keras.utils import to_categorical

y= to_categorical(y)
y.astype('int32')
Out[16]:
array([[0, 1, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 1, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 1, 0],
       [0, 1, 0],
       [0, 0, 1],
       [1, 0, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 0, 1],
       [0, 0, 1],
       [0, 1, 0],
       [1, 0, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 1, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 1, 0],
       [0, 1, 0],
       [0, 1, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 1, 0],
       [0, 1, 0],
       [0, 1, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 1, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 1, 0],
       [0, 1, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 0, 1],
       [1, 0, 0],
       [1, 0, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 0, 1],
       [0, 0, 1],
       [1, 0, 0],
       [0, 0, 1],
       [1, 0, 0],
       [1, 0, 0],
       [0, 1, 0],
       [0, 1, 0],
       [0, 0, 1],
       [0, 1, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 1, 0],
       [1, 0, 0],
       [0, 1, 0],
       [1, 0, 0],
       [0, 0, 1],
       [1, 0, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 1, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 1, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 0, 1],
       [1, 0, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 1, 0],
       [1, 0, 0],
       [0, 1, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 1, 0],
       [0, 0, 1],
       [1, 0, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 1, 0],
       [0, 1, 0],
       [0, 1, 0],
       [0, 0, 1],
       [0, 1, 0],
       [0, 1, 0],
       [1, 0, 0],
       [0, 1, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 1, 0],
       [0, 1, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 1, 0],
       [1, 0, 0],
       [0, 0, 1],
       [1, 0, 0],
       [1, 0, 0],
       [0, 1, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 0, 1],
       [0, 1, 0],
       [0, 0, 1],
       [0, 1, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 1, 0],
       [0, 0, 1],
       [0, 1, 0],
       [0, 1, 0],
       [0, 1, 0],
       [1, 0, 0],
       [0, 1, 0],
       [1, 0, 0],
       [0, 0, 1],
       [1, 0, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 1, 0],
       [0, 0, 1],
       [0, 1, 0],
       [0, 1, 0],
       [0, 1, 0],
       [0, 0, 1],
       [0, 1, 0],
       [0, 1, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 1, 0],
       [0, 0, 1],
       [0, 1, 0],
       [0, 1, 0],
       [0, 1, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 1, 0],
       [1, 0, 0],
       [0, 1, 0],
       [0, 1, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 0, 1],
       [0, 1, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 0, 1],
       [1, 0, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 1, 0],
       [0, 1, 0],
       [0, 0, 1],
       [0, 1, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 1, 0],
       [0, 1, 0],
       [0, 0, 1],
       [0, 1, 0],
       [1, 0, 0],
       [0, 1, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 0, 1],
       [1, 0, 0],
       [0, 1, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 1, 0],
       [1, 0, 0],
       [0, 1, 0],
       [0, 1, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 1, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 1, 0],
       [0, 1, 0],
       [0, 1, 0],
       [0, 1, 0],
       [0, 1, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 1, 0],
       [0, 1, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 0, 1],
       [0, 1, 0],
       [0, 1, 0],
       [1, 0, 0],
       [0, 1, 0],
       [0, 0, 1],
       [0, 1, 0],
       [0, 1, 0],
       [0, 1, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 0, 1],
       [0, 0, 1],
       [1, 0, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 0, 1],
       [1, 0, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 0, 1],
       [0, 1, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 1, 0],
       [1, 0, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 1, 0],
       [1, 0, 0],
       [1, 0, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 1, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 1, 0],
       [0, 1, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 1, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 1, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 1, 0],
       [1, 0, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 1, 0],
       [0, 0, 1],
       [0, 0, 1],
       [1, 0, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 1, 0],
       [0, 0, 1],
       [0, 0, 1],
       [1, 0, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 1, 0],
       [0, 1, 0],
       [0, 1, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 0, 1],
       [0, 1, 0],
       [0, 0, 1],
       [1, 0, 0],
       [0, 0, 1],
       [0, 0, 1],
       [0, 1, 0],
       [0, 0, 1],
       [1, 0, 0],
       [1, 0, 0],
       [1, 0, 0],
       [0, 0, 1],
       [1, 0, 0],
       [1, 0, 0],
       [0, 1, 0],
       [0, 1, 0],
       [0, 1, 0]], dtype=int32)

Train Test Split

In [17]:
from sklearn.model_selection import train_test_split

X_train, X_test, y_train,y_test= train_test_split(X,y, test_size=0.2, random_state=42)

Model Architecture

In [18]:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Dropout, Conv2D, BatchNormalization, MaxPool2D
In [19]:
model= Sequential()
model.add(Conv2D(32, (3,3), activation='relu', padding='same', input_shape=(28,28,1)))
model.add(Conv2D(32, (3,3), activation='relu'))

model.add(MaxPool2D(2,2))
model.add(Dropout(0.25))

model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPool2D(2,2))

model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(3, activation='softmax'))
In [20]:
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 28, 28, 32)        320       
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 26, 26, 32)        9248      
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 13, 13, 32)        0         
_________________________________________________________________
dropout (Dropout)            (None, 13, 13, 32)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 11, 11, 64)        18496     
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 5, 5, 64)          0         
_________________________________________________________________
flatten (Flatten)            (None, 1600)              0         
_________________________________________________________________
dense (Dense)                (None, 64)                102464    
_________________________________________________________________
dropout_1 (Dropout)          (None, 64)                0         
_________________________________________________________________
dense_1 (Dense)              (None, 3)                 195       
=================================================================
Total params: 130,723
Trainable params: 130,723
Non-trainable params: 0
_________________________________________________________________
In [21]:
from tensorflow.keras.callbacks import ModelCheckpoint

model_save= ModelCheckpoint('best.h5', save_best_only=True, monitor='val_accuracy')
In [22]:
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

Training the model

In [23]:
history= model.fit(X_train, y_train, batch_size=20, epochs=100, verbose=1, validation_data=(X_test, y_test), callbacks=[model_save])
Epoch 1/100
12/12 [==============================] - 0s 17ms/step - loss: 1.1239 - accuracy: 0.3167 - val_loss: 1.0997 - val_accuracy: 0.2833
Epoch 2/100
12/12 [==============================] - 0s 4ms/step - loss: 1.0977 - accuracy: 0.3250 - val_loss: 1.0985 - val_accuracy: 0.2667
Epoch 3/100
12/12 [==============================] - 0s 5ms/step - loss: 1.0800 - accuracy: 0.4167 - val_loss: 1.0837 - val_accuracy: 0.3833
Epoch 4/100
12/12 [==============================] - 0s 5ms/step - loss: 1.0567 - accuracy: 0.4667 - val_loss: 1.0364 - val_accuracy: 0.4833
Epoch 5/100
12/12 [==============================] - 0s 5ms/step - loss: 0.9785 - accuracy: 0.5250 - val_loss: 0.9649 - val_accuracy: 0.5000
Epoch 6/100
12/12 [==============================] - 0s 4ms/step - loss: 0.9098 - accuracy: 0.5875 - val_loss: 0.9464 - val_accuracy: 0.5000
Epoch 7/100
12/12 [==============================] - 0s 5ms/step - loss: 0.8686 - accuracy: 0.5833 - val_loss: 0.8197 - val_accuracy: 0.7000
Epoch 8/100
12/12 [==============================] - 0s 5ms/step - loss: 0.7911 - accuracy: 0.6667 - val_loss: 0.6943 - val_accuracy: 0.7333
Epoch 9/100
12/12 [==============================] - 0s 5ms/step - loss: 0.6650 - accuracy: 0.7333 - val_loss: 0.6329 - val_accuracy: 0.7500
Epoch 10/100
12/12 [==============================] - 0s 5ms/step - loss: 0.5535 - accuracy: 0.8250 - val_loss: 0.4904 - val_accuracy: 0.8667
Epoch 11/100
12/12 [==============================] - 0s 3ms/step - loss: 0.4668 - accuracy: 0.8083 - val_loss: 0.4037 - val_accuracy: 0.8500
Epoch 12/100
12/12 [==============================] - 0s 3ms/step - loss: 0.3890 - accuracy: 0.8833 - val_loss: 0.6301 - val_accuracy: 0.7500
Epoch 13/100
12/12 [==============================] - 0s 3ms/step - loss: 0.3630 - accuracy: 0.8542 - val_loss: 0.3618 - val_accuracy: 0.8000
Epoch 14/100
12/12 [==============================] - 0s 5ms/step - loss: 0.3404 - accuracy: 0.8833 - val_loss: 0.3157 - val_accuracy: 0.9167
Epoch 15/100
12/12 [==============================] - 0s 3ms/step - loss: 0.2296 - accuracy: 0.9292 - val_loss: 0.2832 - val_accuracy: 0.9167
Epoch 16/100
12/12 [==============================] - 0s 3ms/step - loss: 0.2002 - accuracy: 0.9208 - val_loss: 0.2460 - val_accuracy: 0.8833
Epoch 17/100
12/12 [==============================] - 0s 3ms/step - loss: 0.1875 - accuracy: 0.9500 - val_loss: 0.2222 - val_accuracy: 0.9167
Epoch 18/100
12/12 [==============================] - 0s 3ms/step - loss: 0.1665 - accuracy: 0.9625 - val_loss: 0.2455 - val_accuracy: 0.9167
Epoch 19/100
12/12 [==============================] - 0s 3ms/step - loss: 0.1311 - accuracy: 0.9583 - val_loss: 0.2300 - val_accuracy: 0.9167
Epoch 20/100
12/12 [==============================] - 0s 3ms/step - loss: 0.1100 - accuracy: 0.9625 - val_loss: 0.2572 - val_accuracy: 0.9000
Epoch 21/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0887 - accuracy: 0.9750 - val_loss: 0.2276 - val_accuracy: 0.9000
Epoch 22/100
12/12 [==============================] - 0s 5ms/step - loss: 0.0997 - accuracy: 0.9625 - val_loss: 0.2332 - val_accuracy: 0.9333
Epoch 23/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0711 - accuracy: 0.9750 - val_loss: 0.1945 - val_accuracy: 0.9333
Epoch 24/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0703 - accuracy: 0.9833 - val_loss: 0.2430 - val_accuracy: 0.9000
Epoch 25/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0768 - accuracy: 0.9833 - val_loss: 0.2099 - val_accuracy: 0.9333
Epoch 26/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0684 - accuracy: 0.9792 - val_loss: 0.1788 - val_accuracy: 0.9333
Epoch 27/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0369 - accuracy: 0.9917 - val_loss: 0.2280 - val_accuracy: 0.9333
Epoch 28/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0363 - accuracy: 0.9917 - val_loss: 0.3382 - val_accuracy: 0.9333
Epoch 29/100
12/12 [==============================] - 0s 5ms/step - loss: 0.0457 - accuracy: 0.9958 - val_loss: 0.2675 - val_accuracy: 0.9500
Epoch 30/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0381 - accuracy: 0.9917 - val_loss: 0.2777 - val_accuracy: 0.9333
Epoch 31/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0253 - accuracy: 0.9958 - val_loss: 0.2432 - val_accuracy: 0.9167
Epoch 32/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0331 - accuracy: 0.9958 - val_loss: 0.2388 - val_accuracy: 0.9167
Epoch 33/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0465 - accuracy: 0.9917 - val_loss: 0.2236 - val_accuracy: 0.9333
Epoch 34/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0258 - accuracy: 0.9958 - val_loss: 0.3889 - val_accuracy: 0.9333
Epoch 35/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0190 - accuracy: 1.0000 - val_loss: 0.1848 - val_accuracy: 0.9167
Epoch 36/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0202 - accuracy: 0.9958 - val_loss: 0.3516 - val_accuracy: 0.9167
Epoch 37/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0150 - accuracy: 0.9958 - val_loss: 0.2604 - val_accuracy: 0.9167
Epoch 38/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0162 - accuracy: 0.9958 - val_loss: 0.4261 - val_accuracy: 0.9000
Epoch 39/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0158 - accuracy: 1.0000 - val_loss: 0.3704 - val_accuracy: 0.9167
Epoch 40/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0232 - accuracy: 0.9958 - val_loss: 0.3247 - val_accuracy: 0.9167
Epoch 41/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0094 - accuracy: 1.0000 - val_loss: 0.2002 - val_accuracy: 0.9333
Epoch 42/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0162 - accuracy: 1.0000 - val_loss: 0.3476 - val_accuracy: 0.9333
Epoch 43/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0127 - accuracy: 1.0000 - val_loss: 0.2738 - val_accuracy: 0.9167
Epoch 44/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0093 - accuracy: 1.0000 - val_loss: 0.3293 - val_accuracy: 0.9500
Epoch 45/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0119 - accuracy: 1.0000 - val_loss: 0.2833 - val_accuracy: 0.9333
Epoch 46/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0025 - accuracy: 1.0000 - val_loss: 0.3621 - val_accuracy: 0.9333
Epoch 47/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0085 - accuracy: 1.0000 - val_loss: 0.3178 - val_accuracy: 0.9167
Epoch 48/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0057 - accuracy: 1.0000 - val_loss: 0.3599 - val_accuracy: 0.9500
Epoch 49/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0043 - accuracy: 1.0000 - val_loss: 0.3611 - val_accuracy: 0.9167
Epoch 50/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0109 - accuracy: 0.9958 - val_loss: 0.4254 - val_accuracy: 0.9333
Epoch 51/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0131 - accuracy: 0.9958 - val_loss: 0.3009 - val_accuracy: 0.9167
Epoch 52/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0066 - accuracy: 1.0000 - val_loss: 0.3425 - val_accuracy: 0.9500
Epoch 53/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0053 - accuracy: 1.0000 - val_loss: 0.3860 - val_accuracy: 0.9333
Epoch 54/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0448 - accuracy: 0.9917 - val_loss: 0.3114 - val_accuracy: 0.9167
Epoch 55/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0423 - accuracy: 0.9833 - val_loss: 0.4639 - val_accuracy: 0.9000
Epoch 56/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0265 - accuracy: 0.9917 - val_loss: 0.3918 - val_accuracy: 0.9167
Epoch 57/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0186 - accuracy: 0.9917 - val_loss: 0.3646 - val_accuracy: 0.8667
Epoch 58/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0143 - accuracy: 1.0000 - val_loss: 0.3633 - val_accuracy: 0.9333
Epoch 59/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0140 - accuracy: 1.0000 - val_loss: 0.3093 - val_accuracy: 0.9333
Epoch 60/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0074 - accuracy: 1.0000 - val_loss: 0.3190 - val_accuracy: 0.9167
Epoch 61/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0040 - accuracy: 1.0000 - val_loss: 0.3723 - val_accuracy: 0.9167
Epoch 62/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0069 - accuracy: 1.0000 - val_loss: 0.3637 - val_accuracy: 0.9167
Epoch 63/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0024 - accuracy: 1.0000 - val_loss: 0.3314 - val_accuracy: 0.9167
Epoch 64/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0103 - accuracy: 0.9958 - val_loss: 0.3246 - val_accuracy: 0.9333
Epoch 65/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0056 - accuracy: 1.0000 - val_loss: 0.2706 - val_accuracy: 0.9167
Epoch 66/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0036 - accuracy: 1.0000 - val_loss: 0.3149 - val_accuracy: 0.9000
Epoch 67/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0060 - accuracy: 1.0000 - val_loss: 0.3119 - val_accuracy: 0.9500
Epoch 68/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0043 - accuracy: 1.0000 - val_loss: 0.3072 - val_accuracy: 0.9333
Epoch 69/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0021 - accuracy: 1.0000 - val_loss: 0.3373 - val_accuracy: 0.9167
Epoch 70/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0032 - accuracy: 1.0000 - val_loss: 0.3861 - val_accuracy: 0.9333
Epoch 71/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0042 - accuracy: 1.0000 - val_loss: 0.2918 - val_accuracy: 0.9167
Epoch 72/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0107 - accuracy: 1.0000 - val_loss: 0.5054 - val_accuracy: 0.9333
Epoch 73/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0201 - accuracy: 0.9958 - val_loss: 0.2471 - val_accuracy: 0.9500
Epoch 74/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0099 - accuracy: 1.0000 - val_loss: 0.2204 - val_accuracy: 0.9333
Epoch 75/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0083 - accuracy: 0.9958 - val_loss: 0.2549 - val_accuracy: 0.9333
Epoch 76/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0035 - accuracy: 1.0000 - val_loss: 0.2643 - val_accuracy: 0.9333
Epoch 77/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0038 - accuracy: 1.0000 - val_loss: 0.3136 - val_accuracy: 0.9333
Epoch 78/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0072 - accuracy: 0.9958 - val_loss: 0.4150 - val_accuracy: 0.9333
Epoch 79/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0018 - accuracy: 1.0000 - val_loss: 0.3384 - val_accuracy: 0.9333
Epoch 80/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0011 - accuracy: 1.0000 - val_loss: 0.3052 - val_accuracy: 0.9333
Epoch 81/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0021 - accuracy: 1.0000 - val_loss: 0.3394 - val_accuracy: 0.9333
Epoch 82/100
12/12 [==============================] - 0s 3ms/step - loss: 9.2191e-04 - accuracy: 1.0000 - val_loss: 0.3482 - val_accuracy: 0.9333
Epoch 83/100
12/12 [==============================] - 0s 3ms/step - loss: 7.2879e-04 - accuracy: 1.0000 - val_loss: 0.3531 - val_accuracy: 0.9333
Epoch 84/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 0.3403 - val_accuracy: 0.9333
Epoch 85/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0012 - accuracy: 1.0000 - val_loss: 0.3709 - val_accuracy: 0.9333
Epoch 86/100
12/12 [==============================] - 0s 3ms/step - loss: 7.5196e-04 - accuracy: 1.0000 - val_loss: 0.3767 - val_accuracy: 0.9333
Epoch 87/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0021 - accuracy: 1.0000 - val_loss: 0.3154 - val_accuracy: 0.9333
Epoch 88/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0014 - accuracy: 1.0000 - val_loss: 0.3961 - val_accuracy: 0.9333
Epoch 89/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0015 - accuracy: 1.0000 - val_loss: 0.4412 - val_accuracy: 0.9333
Epoch 90/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0017 - accuracy: 1.0000 - val_loss: 0.4327 - val_accuracy: 0.9333
Epoch 91/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0011 - accuracy: 1.0000 - val_loss: 0.4278 - val_accuracy: 0.9167
Epoch 92/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0153 - accuracy: 0.9958 - val_loss: 0.6082 - val_accuracy: 0.8833
Epoch 93/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0197 - accuracy: 0.9875 - val_loss: 0.4157 - val_accuracy: 0.9333
Epoch 94/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0374 - accuracy: 0.9917 - val_loss: 0.3612 - val_accuracy: 0.9000
Epoch 95/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0185 - accuracy: 0.9958 - val_loss: 0.1459 - val_accuracy: 0.9333
Epoch 96/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0052 - accuracy: 1.0000 - val_loss: 0.1715 - val_accuracy: 0.9500
Epoch 97/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0067 - accuracy: 0.9958 - val_loss: 0.2334 - val_accuracy: 0.9333
Epoch 98/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0028 - accuracy: 1.0000 - val_loss: 0.2252 - val_accuracy: 0.9500
Epoch 99/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0015 - accuracy: 1.0000 - val_loss: 0.1926 - val_accuracy: 0.9500
Epoch 100/100
12/12 [==============================] - 0s 3ms/step - loss: 0.0059 - accuracy: 1.0000 - val_loss: 0.3065 - val_accuracy: 0.9167

Result and Prediction

In [26]:
model.load_weights('best.h5')

model.evaluate(X_test,y_test, batch_size=20)
3/3 [==============================] - 0s 1ms/step - loss: 0.2675 - accuracy: 0.9500
Out[26]:
[0.2674843370914459, 0.949999988079071]
In [27]:
pred= model.predict(X_test)
predictions= np.argmax(pred, axis=1)
predictions
Out[27]:
array([2, 1, 1, 1, 1, 2, 1, 2, 0, 1, 1, 2, 2, 2, 1, 2, 1, 1, 0, 2, 1, 2,
       2, 0, 0, 2, 0, 0, 2, 0, 0, 2, 2, 2, 2, 1, 1, 1, 0, 1, 1, 0, 0, 0,
       2, 2, 0, 0, 2, 2, 2, 1, 0, 0, 1, 1, 2, 2, 2, 1])

Accuracy and Loss Graph

In [28]:
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
Out[28]:
[<matplotlib.lines.Line2D at 0x7fb22c0d3320>]
In [29]:
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
Out[29]:
[<matplotlib.lines.Line2D at 0x7fb20c0616d8>]

Testing

In [30]:
plt.imshow(X_test[2])
Out[30]:
<matplotlib.image.AxesImage at 0x7fb1c07e6f28>
In [31]:
y_train[2]
Out[31]:
array([0., 1., 0.], dtype=float32)
In [32]:
predictions[2]
Out[32]:
1

DeepCC

In [33]:
!deepCC best.h5
[INFO]
Reading [keras model] 'best.h5'
[SUCCESS]
Saved 'best_deepC/best.onnx'
[INFO]
Reading [onnx model] 'best_deepC/best.onnx'
[INFO]
Model info:
  ir_vesion : 5
  doc       : 
[WARNING]
[ONNX]: graph-node conv2d's attribute auto_pad has no meaningful data.
[WARNING]
[ONNX]: terminal (input/output) conv2d_input's shape is less than 1. Changing it to 1.
[WARNING]
[ONNX]: terminal (input/output) dense_1's shape is less than 1. Changing it to 1.
WARN (GRAPH): found operator node with the same name (dense_1) as io node.
[INFO]
Running DNNC graph sanity check ...
[SUCCESS]
Passed sanity check.
[INFO]
Writing C++ file 'best_deepC/best.cpp'
[INFO]
deepSea model files are ready in 'best_deepC/' 
[RUNNING COMMAND]
g++ -std=c++11 -O3 -fno-rtti -fno-exceptions -I. -I/opt/tljh/user/lib/python3.7/site-packages/deepC-0.13-py3.7-linux-x86_64.egg/deepC/include -isystem /opt/tljh/user/lib/python3.7/site-packages/deepC-0.13-py3.7-linux-x86_64.egg/deepC/packages/eigen-eigen-323c052e1731 "best_deepC/best.cpp" -D_AITS_MAIN -o "best_deepC/best.exe"
[RUNNING COMMAND]
size "best_deepC/best.exe"
   text	   data	    bss	    dec	    hex	filename
 690061	   3784	    760	 694605	  a994d	best_deepC/best.exe
[SUCCESS]
Saved model as executable "best_deepC/best.exe"