Cainvas

Grape Leaf Colour

Credit: AITS Cainvas Community

Photo by Slava Romanov on Dribbble

In [1]:
# Import all the necessary libraries

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import Sequential
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.models import load_model
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import os
import cv2
import pandas as pd
import random
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix

Unzip the Database

In [2]:
!wget 'https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/grape.zip'

!unzip -qo grape.zip
!rm grape.zip
--2021-12-09 09:38:54--  https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/grape.zip
Resolving cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)... 52.219.66.28
Connecting to cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)|52.219.66.28|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 112289427 (107M) [application/x-zip-compressed]
Saving to: ‘grape.zip’

grape.zip           100%[===================>] 107.09M  98.7MB/s    in 1.1s    

2021-12-09 09:38:55 (98.7 MB/s) - ‘grape.zip’ saved [112289427/112289427]

Training Path

In [3]:
train_path = os.path.join(os.getcwd(), 'data', 'train')
validation_path = os.path.join(os.getcwd(), 'data', 'validation')
os.listdir(train_path)
Out[3]:
['red', 'yellow']

Plotting Function to Visualize the Images

In [4]:
# Get image as numpy array
def load_image(name, path):
    img_path = os.path.join(path, name)
    img = cv2.imread(img_path)
   # img = load_img(img_path, target_size = (256, 256))
    img = img[:,:, ::-1]
    return img

# Plot numpy array
def plot_image(img, name, title):
    #image_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    plt.imshow(img)
    plt.suptitle(title)
    #plt.title(name)

# Plot a grid of examples
def plot_grid(img_names, img_root,  title ,rows=2, cols=3):
    fig = plt.figure(figsize=(8,8))

    for i,name in enumerate(img_names):
        #print(os.path.join(img_root, name))
        fig.add_subplot(rows,cols,i+1)
        img = load_image(name, img_root)
        plt.axis("off")
        plot_image(img, name, title)

    plt.show()
In [5]:
red_path = os.path.join(train_path, "red")
red_images = os.listdir(red_path)
plot_grid(red_images[-6:], red_path, title = "Red Images")
In [6]:
yellow_path = os.path.join(train_path, "yellow")
yellow_images = os.listdir(yellow_path)
plot_grid(yellow_images[-6:], yellow_path, title = "Yellow Images")

Defining the training and validation image data generator to expand our dataset of training and validation images. This is done by artificially generating new images by processing the exsisting images. By random flipping, rotating, scaling, and perfroming similar operations, we can create new images.

In [7]:
train_image_generator = ImageDataGenerator(
    rescale = 1.0/255.0,
    rotation_range = 40,
    width_shift_range = 0.2,
    height_shift_range = 0.2, 
    shear_range = 0.2,
    zoom_range = 0.2,
    horizontal_flip = True
)
valid_image_generator = ImageDataGenerator(
rescale = 1.0/255.0)

Loading the Training and Validation Images From Their Respective Directories

In [8]:
training_images = train_image_generator.flow_from_directory(train_path, 
                                                            target_size = (256,256),
                                                            class_mode = 'categorical', 
                                                            batch_size = 20,
                                                            classes = ['red', 'yellow']
                                                            #color_mode="grayscale"
                                                           )
validation_images = valid_image_generator.flow_from_directory(validation_path, 
                                                             target_size = (256,256), 
                                                             class_mode = 'categorical', 
                                                             batch_size = 20,
                                                             classes = ['red', 'yellow']
                                                            #color_mode="grayscale"
                                                            )
Found 901 images belonging to 2 classes.
Found 100 images belonging to 2 classes.

Initializing the Model

In [9]:
# Defining the architecture for our neural network model
model=Sequential()



model.add(Conv2D(128,(3,3), activation='relu', input_shape=(256,256,3)))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64,(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(32,(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(16,(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(16,(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))


model.add(Flatten())

model.add(Dense(150,activation='relu'))
model.add(Dense(100,activation='relu'))
model.add(Dense(2, activation='sigmoid'))
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 254, 254, 128)     3584      
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 127, 127, 128)     0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 125, 125, 64)      73792     
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 62, 62, 64)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 60, 60, 32)        18464     
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 30, 30, 32)        0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 28, 28, 16)        4624      
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 14, 14, 16)        0         
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 12, 12, 16)        2320      
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 (None, 6, 6, 16)          0         
_________________________________________________________________
flatten (Flatten)            (None, 576)               0         
_________________________________________________________________
dense (Dense)                (None, 150)               86550     
_________________________________________________________________
dense_1 (Dense)              (None, 100)               15100     
_________________________________________________________________
dense_2 (Dense)              (None, 2)                 202       
=================================================================
Total params: 204,636
Trainable params: 204,636
Non-trainable params: 0
_________________________________________________________________
In [10]:
model.compile(loss="categorical_crossentropy",
              optimizer=Adam(learning_rate = 10 ** -5),
              metrics=['accuracy'])
In [11]:
# Defiining Early Stopping function to monitor Validation Loss
es = EarlyStopping(monitor='val_loss', patience=5)

Model Training

In [12]:
# Training the model
history = model.fit_generator(training_images, 
                              steps_per_epoch = 15, 
                              epochs = 50, 
                              validation_data= validation_images, 
                              validation_steps = 1,
                              callbacks = [es]
                                          )
WARNING:tensorflow:From <ipython-input-12-c85c19e3658f>:7: Model.fit_generator (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.
Instructions for updating:
Please use Model.fit, which supports generators.
Epoch 1/50
15/15 [==============================] - 115s 8s/step - loss: 0.6918 - accuracy: 0.4833 - val_loss: 0.6925 - val_accuracy: 0.5000
Epoch 2/50
15/15 [==============================] - 116s 8s/step - loss: 0.6898 - accuracy: 0.5267 - val_loss: 0.6872 - val_accuracy: 0.7000
Epoch 3/50
15/15 [==============================] - 107s 7s/step - loss: 0.6860 - accuracy: 0.6655 - val_loss: 0.6840 - val_accuracy: 0.8000
Epoch 4/50
15/15 [==============================] - 115s 8s/step - loss: 0.6807 - accuracy: 0.8033 - val_loss: 0.6802 - val_accuracy: 0.8000
Epoch 5/50
15/15 [==============================] - 114s 8s/step - loss: 0.6776 - accuracy: 0.8500 - val_loss: 0.6732 - val_accuracy: 0.8000
Epoch 6/50
15/15 [==============================] - 115s 8s/step - loss: 0.6696 - accuracy: 0.9110 - val_loss: 0.6571 - val_accuracy: 0.9500
Epoch 7/50
15/15 [==============================] - 114s 8s/step - loss: 0.6607 - accuracy: 0.9333 - val_loss: 0.6512 - val_accuracy: 1.0000
Epoch 8/50
15/15 [==============================] - 107s 7s/step - loss: 0.6504 - accuracy: 0.9680 - val_loss: 0.6342 - val_accuracy: 1.0000
Epoch 9/50
15/15 [==============================] - 114s 8s/step - loss: 0.6384 - accuracy: 0.9833 - val_loss: 0.6281 - val_accuracy: 0.9500
Epoch 10/50
15/15 [==============================] - 114s 8s/step - loss: 0.6161 - accuracy: 0.9900 - val_loss: 0.6102 - val_accuracy: 1.0000
Epoch 11/50
15/15 [==============================] - 116s 8s/step - loss: 0.5960 - accuracy: 0.9867 - val_loss: 0.6052 - val_accuracy: 0.9500
Epoch 12/50
15/15 [==============================] - 116s 8s/step - loss: 0.5678 - accuracy: 0.9833 - val_loss: 0.5614 - val_accuracy: 1.0000
Epoch 13/50
15/15 [==============================] - 115s 8s/step - loss: 0.5443 - accuracy: 0.9867 - val_loss: 0.5219 - val_accuracy: 1.0000
Epoch 14/50
15/15 [==============================] - 107s 7s/step - loss: 0.5058 - accuracy: 0.9929 - val_loss: 0.4828 - val_accuracy: 1.0000
Epoch 15/50
15/15 [==============================] - 107s 7s/step - loss: 0.4627 - accuracy: 1.0000 - val_loss: 0.4103 - val_accuracy: 1.0000
Epoch 16/50
15/15 [==============================] - 107s 7s/step - loss: 0.4094 - accuracy: 1.0000 - val_loss: 0.3351 - val_accuracy: 1.0000
Epoch 17/50
15/15 [==============================] - 115s 8s/step - loss: 0.3660 - accuracy: 1.0000 - val_loss: 0.3066 - val_accuracy: 1.0000
Epoch 18/50
15/15 [==============================] - 107s 7s/step - loss: 0.3078 - accuracy: 0.9964 - val_loss: 0.2382 - val_accuracy: 1.0000
Epoch 19/50
15/15 [==============================] - 115s 8s/step - loss: 0.2499 - accuracy: 0.9967 - val_loss: 0.2063 - val_accuracy: 1.0000
Epoch 20/50
15/15 [==============================] - 115s 8s/step - loss: 0.2004 - accuracy: 0.9967 - val_loss: 0.1556 - val_accuracy: 1.0000
Epoch 21/50
15/15 [==============================] - 115s 8s/step - loss: 0.1503 - accuracy: 1.0000 - val_loss: 0.0862 - val_accuracy: 1.0000
Epoch 22/50
15/15 [==============================] - 115s 8s/step - loss: 0.1201 - accuracy: 1.0000 - val_loss: 0.0758 - val_accuracy: 1.0000
Epoch 23/50
15/15 [==============================] - 107s 7s/step - loss: 0.0879 - accuracy: 1.0000 - val_loss: 0.0755 - val_accuracy: 1.0000
Epoch 24/50
15/15 [==============================] - 107s 7s/step - loss: 0.0685 - accuracy: 1.0000 - val_loss: 0.0371 - val_accuracy: 1.0000
Epoch 25/50
15/15 [==============================] - 115s 8s/step - loss: 0.0579 - accuracy: 0.9933 - val_loss: 0.0370 - val_accuracy: 1.0000
Epoch 26/50
15/15 [==============================] - 115s 8s/step - loss: 0.0496 - accuracy: 1.0000 - val_loss: 0.0326 - val_accuracy: 1.0000
Epoch 27/50
15/15 [==============================] - 111s 7s/step - loss: 0.0404 - accuracy: 0.9964 - val_loss: 0.0171 - val_accuracy: 1.0000
Epoch 28/50
15/15 [==============================] - 120s 8s/step - loss: 0.0287 - accuracy: 1.0000 - val_loss: 0.0256 - val_accuracy: 1.0000
Epoch 29/50
15/15 [==============================] - 119s 8s/step - loss: 0.0244 - accuracy: 1.0000 - val_loss: 0.0276 - val_accuracy: 1.0000
Epoch 30/50
15/15 [==============================] - 119s 8s/step - loss: 0.0254 - accuracy: 0.9967 - val_loss: 0.0239 - val_accuracy: 1.0000
Epoch 31/50
15/15 [==============================] - 115s 8s/step - loss: 0.0179 - accuracy: 1.0000 - val_loss: 0.0131 - val_accuracy: 1.0000
Epoch 32/50
15/15 [==============================] - 115s 8s/step - loss: 0.0209 - accuracy: 0.9967 - val_loss: 0.0553 - val_accuracy: 0.9500
Epoch 33/50
15/15 [==============================] - 111s 7s/step - loss: 0.0181 - accuracy: 1.0000 - val_loss: 0.0287 - val_accuracy: 1.0000
Epoch 34/50
15/15 [==============================] - 113s 8s/step - loss: 0.0134 - accuracy: 1.0000 - val_loss: 0.0501 - val_accuracy: 1.0000
Epoch 35/50
15/15 [==============================] - 113s 8s/step - loss: 0.0123 - accuracy: 1.0000 - val_loss: 0.0305 - val_accuracy: 1.0000
Epoch 36/50
15/15 [==============================] - 113s 8s/step - loss: 0.0116 - accuracy: 1.0000 - val_loss: 0.0073 - val_accuracy: 1.0000
Epoch 37/50
15/15 [==============================] - 106s 7s/step - loss: 0.0107 - accuracy: 1.0000 - val_loss: 0.0239 - val_accuracy: 1.0000
Epoch 38/50
15/15 [==============================] - 113s 8s/step - loss: 0.0087 - accuracy: 1.0000 - val_loss: 0.0043 - val_accuracy: 1.0000
Epoch 39/50
15/15 [==============================] - 105s 7s/step - loss: 0.0099 - accuracy: 1.0000 - val_loss: 0.0145 - val_accuracy: 1.0000
Epoch 40/50
15/15 [==============================] - 113s 8s/step - loss: 0.0152 - accuracy: 0.9933 - val_loss: 0.0049 - val_accuracy: 1.0000
Epoch 41/50
15/15 [==============================] - 113s 8s/step - loss: 0.0094 - accuracy: 1.0000 - val_loss: 0.0025 - val_accuracy: 1.0000
Epoch 42/50
15/15 [==============================] - 113s 8s/step - loss: 0.0097 - accuracy: 1.0000 - val_loss: 0.0083 - val_accuracy: 1.0000
Epoch 43/50
15/15 [==============================] - 106s 7s/step - loss: 0.0146 - accuracy: 0.9964 - val_loss: 0.0027 - val_accuracy: 1.0000
Epoch 44/50
15/15 [==============================] - 113s 8s/step - loss: 0.0084 - accuracy: 1.0000 - val_loss: 0.0032 - val_accuracy: 1.0000
Epoch 45/50
15/15 [==============================] - 114s 8s/step - loss: 0.0053 - accuracy: 1.0000 - val_loss: 0.0028 - val_accuracy: 1.0000
Epoch 46/50
15/15 [==============================] - 106s 7s/step - loss: 0.0068 - accuracy: 1.0000 - val_loss: 0.0029 - val_accuracy: 1.0000
In [13]:
model.save('best_model.h5')
In [14]:
# Function to plot "accuracy vs epoch" graphs and "loss vs epoch" graphs for training and validation data
def plot_metrics(model_name, metric = 'accuracy'):
    if metric == 'loss':
        plt.title("Loss Values")
        plt.plot(model_name.history['loss'], label = 'train')
        plt.plot(model_name.history['val_loss'], label = 'test')
        plt.legend()
        plt.show()
    else:
        plt.title("Accuracy Values")
        plt.plot(model_name.history['accuracy'], label='train') 
        plt.plot(model_name.history['val_accuracy'], label='test') 
        plt.legend()
        plt.show()
In [15]:
plot_metrics(history, 'accuracy')
plot_metrics(history, 'loss')

Predicting using our Model

In [16]:
prediction_loss, prediction_accuracy = model.evaluate(validation_images)
print("Prediction Accuracy: ", prediction_accuracy)
5/5 [==============================] - 9s 2s/step - loss: 0.0095 - accuracy: 1.0000
Prediction Accuracy:  1.0
In [17]:
model = load_model('best_model.h5')

Randomly Selecting a Red and Yellow Image from the Dataset to Predict the Colour

In [18]:
def predict(img_path, model):
    image = load_img(img_path)
    image = image.resize((256,256))
    image = img_to_array(image)
    image = np.expand_dims(image, axis = 0)
    pred = np.argmax(model.predict(image))
    if pred == 0:
        return 'Red'
    return 'Yellow' 
In [19]:
img_path = os.path.join (red_path, random.choice(os.listdir(red_path)))
prediction = predict(img_path, model)
pred = 'Prediction is ' + prediction
img = cv2.imread(img_path)
img = img[:,:, ::-1]
plt.imshow(img)
plt.grid(False)
plt.axis("off")
plt.title(pred)
plt.suptitle("Actual Colour : Red")
Out[19]:
Text(0.5, 0.98, 'Actual Colour : Red')
In [20]:
img_path = os.path.join (yellow_path, random.choice(os.listdir(yellow_path)))
prediction = predict(img_path, model)
pred = 'Prediction is ' + prediction
img = cv2.imread(img_path)
img = img[:,:, ::-1]
plt.imshow(img)
plt.grid(False)
plt.axis("off")
plt.title(pred)
plt.suptitle("Actual Colour : Yellow")
Out[20]:
Text(0.5, 0.98, 'Actual Colour : Yellow')
In [21]:
!deepCC 'best_model.h5'
[INFO]
Reading [keras model] 'best_model.h5'
[SUCCESS]
Saved 'best_model_deepC/best_model.onnx'
[INFO]
Reading [onnx model] 'best_model_deepC/best_model.onnx'
[INFO]
Model info:
  ir_vesion : 4
  doc       : 
[WARNING]
[ONNX]: terminal (input/output) conv2d_input's shape is less than 1. Changing it to 1.
[WARNING]
[ONNX]: terminal (input/output) dense_2's shape is less than 1. Changing it to 1.
[INFO]
Running DNNC graph sanity check ...
[SUCCESS]
Passed sanity check.
[INFO]
Writing C++ file 'best_model_deepC/best_model.cpp'
[INFO]
deepSea model files are ready in 'best_model_deepC/' 
[RUNNING COMMAND]
g++ -std=c++11 -O3 -fno-rtti -fno-exceptions -I. -I/opt/tljh/user/lib/python3.7/site-packages/deepC-0.13-py3.7-linux-x86_64.egg/deepC/include -isystem /opt/tljh/user/lib/python3.7/site-packages/deepC-0.13-py3.7-linux-x86_64.egg/deepC/packages/eigen-eigen-323c052e1731 "best_model_deepC/best_model.cpp" -D_AITS_MAIN -o "best_model_deepC/best_model.exe"
[RUNNING COMMAND]
size "best_model_deepC/best_model.exe"
   text	   data	    bss	    dec	    hex	filename
1008437	   3952	    760	1013149	  f759d	best_model_deepC/best_model.exe
[SUCCESS]
Saved model as executable "best_model_deepC/best_model.exe"
In [ ]: