Cainvas

Apple Leaf Diseases

Credit: AITS Cainvas Community

Photo by Cvijovic Zarko on Dribbble

In [1]:
# Import all the necessary libraries

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import Sequential
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.models import load_model
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import os
import cv2
import pandas as pd
import random
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix

Unzip the Dataset

In [2]:
!wget 'https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/apple_leaf_2.zip'

!unzip -qo apple_leaf_2.zip
!rm apple_leaf_2.zip
--2021-12-09 15:49:17--  https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/apple_leaf_2.zip
Resolving cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)... 52.219.158.67
Connecting to cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)|52.219.158.67|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 7012286 (6.7M) [application/x-zip-compressed]
Saving to: ‘apple_leaf_2.zip’

apple_leaf_2.zip    100%[===================>]   6.69M  --.-KB/s    in 0.03s   

2021-12-09 15:49:17 (197 MB/s) - ‘apple_leaf_2.zip’ saved [7012286/7012286]

In [3]:
train_path = os.path.join(os.getcwd(), 'apple_leaf')
os.listdir(train_path)
Out[3]:
['Apple Cedar Rust', 'Apple Scab', 'Apple Black Rot']

Plotting Function to Visualize Images

In [4]:
# Get image as numpy array
def load_image(name, path):
    img_path = os.path.join(path, name)
    img = cv2.imread(img_path)
   # img = load_img(img_path, target_size = (256, 256))
    img = img[:,:, ::-1]
    return img

# Plot numpy array
def plot_image(img, name, title):    
    plt.imshow(img)
    plt.suptitle(title)

# Plot a grid of examples
def plot_grid(img_names, img_root,  title ,rows=2, cols=3):
    fig = plt.figure(figsize=(8,8))

    for i,name in enumerate(img_names):      
        fig.add_subplot(rows,cols,i+1)
        img = load_image(name, img_root)
        plt.axis("off")
        plot_image(img, name, title)

    plt.show()
In [5]:
black_rot_path = os.path.join(train_path, "Apple Black Rot")
black_rot_images = os.listdir(black_rot_path)
plot_grid(black_rot_images[-6:], black_rot_path, title = "Apple Black Rot Images")
In [6]:
cedar_rust_path = os.path.join(train_path, "Apple Cedar Rust")
cedar_rust_images = os.listdir(cedar_rust_path)
plot_grid(cedar_rust_images[-6:], cedar_rust_path, title = "Apple Cedar Rust Images")
In [7]:
scab_path = os.path.join(train_path, "Apple Scab")
scab_images = os.listdir(scab_path)
plot_grid(scab_images[-6:], scab_path, title = "Apple Scab")

Define Image Data Generators

In [8]:
train_image_generator = ImageDataGenerator(
    rescale = 1.0/255.0,
    rotation_range = 40,
    width_shift_range = 0.2,
    height_shift_range = 0.2, 
    shear_range = 0.2,
    zoom_range = 0.2,
    horizontal_flip = True,
    validation_split = 0.2
)

valid_image_generator = ImageDataGenerator(
rescale = 1.0/2555.0,
rotation_range = 40,
    width_shift_range = 0.2,
    height_shift_range = 0.2, 
    shear_range = 0.2,
    zoom_range = 0.2,
    horizontal_flip = True)

Defining the training and validation image data generator to expand our dataset of training and validation images. This is done by artificially generating new images by processing the exsisting images. By random flipping, rotating, scaling, and perfroming similar operations, we can create new images.

Loading Image Generators

In [9]:
batch_size = 20

training_images = train_image_generator.flow_from_directory(train_path, 
                                                            target_size = (64,64),
                                                            class_mode = 'categorical', 
                                                            batch_size = batch_size,
                                                            classes = os.listdir(train_path),
                                                            subset = 'training',
                                                            shuffle = True
                                                            #color_mode="grayscale"
                                                           )
validation_images = train_image_generator.flow_from_directory(train_path, 
                                                             target_size = (64,64), 
                                                             class_mode = 'categorical', 
                                                             batch_size = batch_size,
                                                             classes = os.listdir(train_path),
                                                             subset = 'validation',
                                                             shuffle = True
                                                            #color_mode="grayscale"
                                                            )
Found 384 images belonging to 3 classes.
Found 96 images belonging to 3 classes.

Defining Model Architecture

In [10]:
# Defining the architecture for our neural network model
model=Sequential()



model.add(Conv2D(256,(2,2), activation='relu', input_shape=(64,64,3)))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(128,(2,2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64,(2,2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(32,(2,2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())

model.add(Dense(150,activation='relu'))
model.add(Dense(100,activation='relu'))
model.add(Dense(3, activation='sigmoid'))
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 63, 63, 256)       3328      
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 31, 31, 256)       0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 30, 30, 128)       131200    
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 15, 15, 128)       0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 14, 14, 64)        32832     
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 7, 7, 64)          0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 6, 6, 32)          8224      
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 3, 3, 32)          0         
_________________________________________________________________
flatten (Flatten)            (None, 288)               0         
_________________________________________________________________
dense (Dense)                (None, 150)               43350     
_________________________________________________________________
dense_1 (Dense)              (None, 100)               15100     
_________________________________________________________________
dense_2 (Dense)              (None, 3)                 303       
=================================================================
Total params: 234,337
Trainable params: 234,337
Non-trainable params: 0
_________________________________________________________________
In [11]:
model.compile(loss="categorical_crossentropy",
              optimizer=Adam(learning_rate = 10 ** -3),
              metrics=['accuracy'])

Train the Model

In [12]:
history = model.fit_generator(
    training_images,
    steps_per_epoch = training_images.samples // batch_size,
    validation_data = validation_images, 
    validation_steps = validation_images.samples // batch_size,
    epochs = 70,
    #callbacks = [es]
)
WARNING:tensorflow:From <ipython-input-12-ad12460c4dae>:6: Model.fit_generator (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.
Instructions for updating:
Please use Model.fit, which supports generators.
Epoch 1/70
19/19 [==============================] - 16s 843ms/step - loss: 1.0984 - accuracy: 0.3434 - val_loss: 1.0918 - val_accuracy: 0.3375
Epoch 2/70
19/19 [==============================] - 16s 835ms/step - loss: 1.0993 - accuracy: 0.3736 - val_loss: 1.0866 - val_accuracy: 0.3500
Epoch 3/70
19/19 [==============================] - 16s 831ms/step - loss: 1.0923 - accuracy: 0.4313 - val_loss: 1.0825 - val_accuracy: 0.5500
Epoch 4/70
19/19 [==============================] - 16s 830ms/step - loss: 1.0676 - accuracy: 0.3654 - val_loss: 1.0315 - val_accuracy: 0.4000
Epoch 5/70
19/19 [==============================] - 16s 828ms/step - loss: 1.0025 - accuracy: 0.5769 - val_loss: 0.8703 - val_accuracy: 0.5000
Epoch 6/70
19/19 [==============================] - 16s 825ms/step - loss: 0.7526 - accuracy: 0.6813 - val_loss: 0.4956 - val_accuracy: 0.8250
Epoch 7/70
19/19 [==============================] - 16s 826ms/step - loss: 0.6566 - accuracy: 0.7225 - val_loss: 0.5451 - val_accuracy: 0.7875
Epoch 8/70
19/19 [==============================] - 16s 822ms/step - loss: 0.4129 - accuracy: 0.8544 - val_loss: 0.4886 - val_accuracy: 0.8250
Epoch 9/70
19/19 [==============================] - 16s 822ms/step - loss: 0.4238 - accuracy: 0.8462 - val_loss: 0.4217 - val_accuracy: 0.8250
Epoch 10/70
19/19 [==============================] - 16s 825ms/step - loss: 0.3810 - accuracy: 0.8462 - val_loss: 0.5136 - val_accuracy: 0.7750
Epoch 11/70
19/19 [==============================] - 16s 828ms/step - loss: 0.8338 - accuracy: 0.7473 - val_loss: 0.7941 - val_accuracy: 0.6125
Epoch 12/70
19/19 [==============================] - 16s 822ms/step - loss: 0.6429 - accuracy: 0.7747 - val_loss: 0.5473 - val_accuracy: 0.8250
Epoch 13/70
19/19 [==============================] - 16s 825ms/step - loss: 0.4221 - accuracy: 0.8709 - val_loss: 0.3441 - val_accuracy: 0.8750
Epoch 14/70
19/19 [==============================] - 16s 821ms/step - loss: 0.2886 - accuracy: 0.8819 - val_loss: 0.2879 - val_accuracy: 0.8750
Epoch 15/70
19/19 [==============================] - 16s 857ms/step - loss: 0.3779 - accuracy: 0.8421 - val_loss: 0.3761 - val_accuracy: 0.8500
Epoch 16/70
19/19 [==============================] - 16s 822ms/step - loss: 0.3027 - accuracy: 0.9066 - val_loss: 0.2793 - val_accuracy: 0.9125
Epoch 17/70
19/19 [==============================] - 16s 822ms/step - loss: 0.2858 - accuracy: 0.8901 - val_loss: 0.3842 - val_accuracy: 0.8250
Epoch 18/70
19/19 [==============================] - 16s 832ms/step - loss: 0.2590 - accuracy: 0.9038 - val_loss: 0.3164 - val_accuracy: 0.8875
Epoch 19/70
19/19 [==============================] - 16s 819ms/step - loss: 0.2841 - accuracy: 0.9176 - val_loss: 0.2783 - val_accuracy: 0.8875
Epoch 20/70
19/19 [==============================] - 16s 825ms/step - loss: 0.2836 - accuracy: 0.9011 - val_loss: 0.3510 - val_accuracy: 0.8625
Epoch 21/70
19/19 [==============================] - 16s 827ms/step - loss: 0.2290 - accuracy: 0.9148 - val_loss: 0.2584 - val_accuracy: 0.9125
Epoch 22/70
19/19 [==============================] - 16s 828ms/step - loss: 0.2253 - accuracy: 0.9203 - val_loss: 0.2338 - val_accuracy: 0.8875
Epoch 23/70
19/19 [==============================] - 16s 821ms/step - loss: 0.3064 - accuracy: 0.8956 - val_loss: 0.2697 - val_accuracy: 0.9250
Epoch 24/70
19/19 [==============================] - 16s 823ms/step - loss: 0.2929 - accuracy: 0.8984 - val_loss: 0.5497 - val_accuracy: 0.8250
Epoch 25/70
19/19 [==============================] - 16s 821ms/step - loss: 0.4020 - accuracy: 0.8544 - val_loss: 0.3140 - val_accuracy: 0.9000
Epoch 26/70
19/19 [==============================] - 16s 825ms/step - loss: 0.2647 - accuracy: 0.9011 - val_loss: 0.3670 - val_accuracy: 0.8625
Epoch 27/70
19/19 [==============================] - 16s 818ms/step - loss: 0.2715 - accuracy: 0.8984 - val_loss: 0.4031 - val_accuracy: 0.8500
Epoch 28/70
19/19 [==============================] - 16s 829ms/step - loss: 0.2823 - accuracy: 0.9038 - val_loss: 0.3734 - val_accuracy: 0.8625
Epoch 29/70
19/19 [==============================] - 16s 821ms/step - loss: 0.2723 - accuracy: 0.9093 - val_loss: 0.3889 - val_accuracy: 0.8500
Epoch 30/70
19/19 [==============================] - 16s 822ms/step - loss: 0.1974 - accuracy: 0.9313 - val_loss: 0.3625 - val_accuracy: 0.9000
Epoch 31/70
19/19 [==============================] - 16s 817ms/step - loss: 0.2002 - accuracy: 0.9148 - val_loss: 0.3471 - val_accuracy: 0.8875
Epoch 32/70
19/19 [==============================] - 15s 815ms/step - loss: 0.2117 - accuracy: 0.9121 - val_loss: 0.3492 - val_accuracy: 0.8750
Epoch 33/70
19/19 [==============================] - 16s 847ms/step - loss: 0.2407 - accuracy: 0.8974 - val_loss: 0.1768 - val_accuracy: 0.9375
Epoch 34/70
19/19 [==============================] - 15s 813ms/step - loss: 0.2024 - accuracy: 0.9423 - val_loss: 0.2960 - val_accuracy: 0.8750
Epoch 35/70
19/19 [==============================] - 16s 821ms/step - loss: 0.2363 - accuracy: 0.9258 - val_loss: 0.3358 - val_accuracy: 0.8625
Epoch 36/70
19/19 [==============================] - 16s 820ms/step - loss: 0.1955 - accuracy: 0.9423 - val_loss: 0.2198 - val_accuracy: 0.9125
Epoch 37/70
19/19 [==============================] - 16s 856ms/step - loss: 0.1923 - accuracy: 0.9313 - val_loss: 0.2969 - val_accuracy: 0.8875
Epoch 38/70
19/19 [==============================] - 16s 820ms/step - loss: 0.2237 - accuracy: 0.9038 - val_loss: 0.2225 - val_accuracy: 0.9250
Epoch 39/70
19/19 [==============================] - 17s 881ms/step - loss: 0.1615 - accuracy: 0.9396 - val_loss: 0.3498 - val_accuracy: 0.8500
Epoch 40/70
19/19 [==============================] - 16s 848ms/step - loss: 0.3449 - accuracy: 0.8632 - val_loss: 0.2629 - val_accuracy: 0.9125
Epoch 41/70
19/19 [==============================] - 16s 853ms/step - loss: 0.1855 - accuracy: 0.9505 - val_loss: 0.2854 - val_accuracy: 0.9125
Epoch 42/70
19/19 [==============================] - 16s 816ms/step - loss: 0.1453 - accuracy: 0.9588 - val_loss: 0.3146 - val_accuracy: 0.9125
Epoch 43/70
19/19 [==============================] - 16s 823ms/step - loss: 0.1424 - accuracy: 0.9533 - val_loss: 0.1471 - val_accuracy: 0.9500
Epoch 44/70
19/19 [==============================] - 15s 814ms/step - loss: 0.1228 - accuracy: 0.9560 - val_loss: 0.2944 - val_accuracy: 0.9125
Epoch 45/70
19/19 [==============================] - 15s 812ms/step - loss: 0.1787 - accuracy: 0.9396 - val_loss: 0.1875 - val_accuracy: 0.9375
Epoch 46/70
19/19 [==============================] - 16s 821ms/step - loss: 0.1664 - accuracy: 0.9451 - val_loss: 0.2096 - val_accuracy: 0.9250
Epoch 47/70
19/19 [==============================] - 15s 812ms/step - loss: 0.1391 - accuracy: 0.9560 - val_loss: 0.2616 - val_accuracy: 0.9000
Epoch 48/70
19/19 [==============================] - 16s 825ms/step - loss: 0.1547 - accuracy: 0.9478 - val_loss: 0.1645 - val_accuracy: 0.9375
Epoch 49/70
19/19 [==============================] - 15s 815ms/step - loss: 0.1256 - accuracy: 0.9560 - val_loss: 0.1146 - val_accuracy: 0.9750
Epoch 50/70
19/19 [==============================] - 16s 821ms/step - loss: 0.1127 - accuracy: 0.9615 - val_loss: 0.2312 - val_accuracy: 0.9000
Epoch 51/70
19/19 [==============================] - 16s 851ms/step - loss: 0.1499 - accuracy: 0.9478 - val_loss: 0.1073 - val_accuracy: 0.9750
Epoch 52/70
19/19 [==============================] - 16s 821ms/step - loss: 0.1074 - accuracy: 0.9698 - val_loss: 0.1790 - val_accuracy: 0.9500
Epoch 53/70
19/19 [==============================] - 16s 830ms/step - loss: 0.1340 - accuracy: 0.9478 - val_loss: 0.5367 - val_accuracy: 0.8625
Epoch 54/70
19/19 [==============================] - 16s 818ms/step - loss: 0.2115 - accuracy: 0.9258 - val_loss: 0.3202 - val_accuracy: 0.8625
Epoch 55/70
19/19 [==============================] - 15s 813ms/step - loss: 0.1202 - accuracy: 0.9560 - val_loss: 0.1286 - val_accuracy: 0.9625
Epoch 56/70
19/19 [==============================] - 16s 822ms/step - loss: 0.0964 - accuracy: 0.9615 - val_loss: 0.1370 - val_accuracy: 0.9625
Epoch 57/70
19/19 [==============================] - 15s 815ms/step - loss: 0.1739 - accuracy: 0.9341 - val_loss: 0.3869 - val_accuracy: 0.8625
Epoch 58/70
19/19 [==============================] - 16s 820ms/step - loss: 0.1105 - accuracy: 0.9698 - val_loss: 0.1759 - val_accuracy: 0.9250
Epoch 59/70
19/19 [==============================] - 15s 814ms/step - loss: 0.1083 - accuracy: 0.9615 - val_loss: 0.1491 - val_accuracy: 0.9250
Epoch 60/70
19/19 [==============================] - 16s 851ms/step - loss: 0.1220 - accuracy: 0.9505 - val_loss: 0.1844 - val_accuracy: 0.9250
Epoch 61/70
19/19 [==============================] - 16s 819ms/step - loss: 0.0791 - accuracy: 0.9753 - val_loss: 0.3837 - val_accuracy: 0.8625
Epoch 62/70
19/19 [==============================] - 16s 816ms/step - loss: 0.1203 - accuracy: 0.9615 - val_loss: 0.1141 - val_accuracy: 0.9500
Epoch 63/70
19/19 [==============================] - 15s 815ms/step - loss: 0.0923 - accuracy: 0.9698 - val_loss: 0.1209 - val_accuracy: 0.9500
Epoch 64/70
19/19 [==============================] - 16s 817ms/step - loss: 0.0687 - accuracy: 0.9808 - val_loss: 0.1024 - val_accuracy: 0.9500
Epoch 65/70
19/19 [==============================] - 16s 823ms/step - loss: 0.0653 - accuracy: 0.9753 - val_loss: 0.1749 - val_accuracy: 0.9375
Epoch 66/70
19/19 [==============================] - 16s 818ms/step - loss: 0.1003 - accuracy: 0.9698 - val_loss: 0.0722 - val_accuracy: 0.9875
Epoch 67/70
19/19 [==============================] - 16s 819ms/step - loss: 0.1309 - accuracy: 0.9533 - val_loss: 0.1467 - val_accuracy: 0.9625
Epoch 68/70
19/19 [==============================] - 16s 817ms/step - loss: 0.0965 - accuracy: 0.9588 - val_loss: 0.1150 - val_accuracy: 0.9625
Epoch 69/70
19/19 [==============================] - 16s 822ms/step - loss: 0.0568 - accuracy: 0.9808 - val_loss: 0.1698 - val_accuracy: 0.9500
Epoch 70/70
19/19 [==============================] - 16s 817ms/step - loss: 0.1053 - accuracy: 0.9505 - val_loss: 0.1579 - val_accuracy: 0.9375
In [13]:
model.save('best_model.h5')
In [14]:
# Function to plot "accuracy vs epoch" graphs and "loss vs epoch" graphs for training and validation data
def plot_metrics(model_name, metric = 'accuracy'):
    if metric == 'loss':
        plt.title("Loss Values")
        plt.plot(model_name.history['loss'], label = 'train')
        plt.plot(model_name.history['val_loss'], label = 'test')
        plt.legend()
        plt.show()
    else:
        plt.title("Accuracy Values")
        plt.plot(model_name.history['accuracy'], label='train') 
        plt.plot(model_name.history['val_accuracy'], label='test') 
        plt.legend()
        plt.show()
In [15]:
plot_metrics(history, 'accuracy')
plot_metrics(history, 'loss')

Making Predictions

In [16]:
prediction_loss, prediction_accuracy = model.evaluate(validation_images)
print("Prediction Accuracy: ", prediction_accuracy)
5/5 [==============================] - 1s 192ms/step - loss: 0.1235 - accuracy: 0.9583
Prediction Accuracy:  0.9583333134651184
In [17]:
model = load_model('best_model.h5')
In [18]:
def predict(img_path, model):
    image = load_img(img_path)
    image = image.resize((64,64))
    image = img_to_array(image)
    image = np.expand_dims(image, axis = 0)
    pred = model.predict(image)
    #print(pred) 
    pred = np.argmax(pred)
       
    if pred == 0:
        return 'Apple Black Rot'
    elif pred == 1:
        return "Apple Cedar Rust"
    else:
        return "Apple Scab"
In [19]:
img_path = os.path.join (scab_path, random.choice(os.listdir(scab_path)))
#print(img_path)
prediction = predict(img_path, model)
pred = 'Prediction is ' + prediction
img = cv2.imread(img_path)
# Convert Image for BGR to RGB format
img = img[:,:, ::-1]
plt.imshow(img)
plt.grid(False)
plt.axis("off")
plt.title(pred)
plt.suptitle("Actual Disease : Apple Scab")
Out[19]:
Text(0.5, 0.98, 'Actual Disease : Apple Scab')

DeepCC

In [21]:
!deepCC "best_model.h5"
[INFO]
Reading [keras model] 'best_model.h5'
[SUCCESS]
Saved 'best_model_deepC/best_model.onnx'
[INFO]
Reading [onnx model] 'best_model_deepC/best_model.onnx'
[INFO]
Model info:
  ir_vesion : 4
  doc       : 
[WARNING]
[ONNX]: terminal (input/output) conv2d_input's shape is less than 1. Changing it to 1.
[WARNING]
[ONNX]: terminal (input/output) dense_2's shape is less than 1. Changing it to 1.
[INFO]
Running DNNC graph sanity check ...
[SUCCESS]
Passed sanity check.
[INFO]
Writing C++ file 'best_model_deepC/best_model.cpp'
[INFO]
deepSea model files are ready in 'best_model_deepC/' 
[RUNNING COMMAND]
g++ -std=c++11 -O3 -fno-rtti -fno-exceptions -I. -I/opt/tljh/user/lib/python3.7/site-packages/deepC-0.13-py3.7-linux-x86_64.egg/deepC/include -isystem /opt/tljh/user/lib/python3.7/site-packages/deepC-0.13-py3.7-linux-x86_64.egg/deepC/packages/eigen-eigen-323c052e1731 "best_model_deepC/best_model.cpp" -D_AITS_MAIN -o "best_model_deepC/best_model.exe"
[RUNNING COMMAND]
size "best_model_deepC/best_model.exe"
   text	   data	    bss	    dec	    hex	filename
1125003	   3960	    760	1129723	 113cfb	best_model_deepC/best_model.exe
[SUCCESS]
Saved model as executable "best_model_deepC/best_model.exe"
In [ ]: