Cainvas

Potato Weed Plant Detection

Credit: AITS Cainvas Community

Photo by Michelle Porucznik on Dribbble

Import all the Necessary Libraries

In [1]:
# Import all the necessary libraries

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import Sequential
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.models import load_model
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import os
import cv2
import pandas as pd
import random
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler

Unzip the Dataset

In [2]:
!wget 'https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/potato-weed.zip'

!unzip -qo potato-weed.zip
!rm potato-weed.zip
--2021-12-09 16:51:14--  https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/potato-weed.zip
Resolving cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)... 52.219.66.104
Connecting to cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)|52.219.66.104|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 10165341 (9.7M) [application/x-zip-compressed]
Saving to: ‘potato-weed.zip’

potato-weed.zip     100%[===================>]   9.69M  --.-KB/s    in 0.07s   

2021-12-09 16:51:14 (139 MB/s) - ‘potato-weed.zip’ saved [10165341/10165341]

Archive:  potato-weed.zip
  inflating: Dataset/train/potato/1.jpg  
  inflating: Dataset/train/potato/10.jpg  
  inflating: Dataset/train/potato/100.jpg  
  inflating: Dataset/train/potato/101.jpg  
  inflating: Dataset/train/potato/102.jpg  
  inflating: Dataset/train/potato/103.jpg  
  inflating: Dataset/train/potato/104.jpg  
  inflating: Dataset/train/potato/105.jpg  
  inflating: Dataset/train/potato/106.jpg  
  inflating: Dataset/train/potato/107.jpg  
  inflating: Dataset/train/potato/108.jpg  
  inflating: Dataset/train/potato/109.jpg  
  inflating: Dataset/train/potato/11.jpg  
  inflating: Dataset/train/potato/110.jpg  
  inflating: Dataset/train/potato/111.jpg  
  inflating: Dataset/train/potato/112.jpg  
  inflating: Dataset/train/potato/113.jpg  
  inflating: Dataset/train/potato/114.jpg  
  inflating: Dataset/train/potato/115.jpg  
  inflating: Dataset/train/potato/116.jpg  
  inflating: Dataset/train/potato/117.jpg  
  inflating: Dataset/train/potato/118.jpg  
  inflating: Dataset/train/potato/119.jpg  
  inflating: Dataset/train/potato/12.jpg  
  inflating: Dataset/train/potato/120.jpg  
  inflating: Dataset/train/potato/121.jpg  
  inflating: Dataset/train/potato/122.jpg  
  inflating: Dataset/train/potato/123.jpg  
  inflating: Dataset/train/potato/124.jpg  
  inflating: Dataset/train/potato/125.jpg  
  inflating: Dataset/train/potato/126.jpg  
  inflating: Dataset/train/potato/127.jpg  
  inflating: Dataset/train/potato/128.jpg  
  inflating: Dataset/train/potato/129.jpg  
  inflating: Dataset/train/potato/13.jpg  
  inflating: Dataset/train/potato/130.jpg  
  inflating: Dataset/train/potato/131.jpg  
  inflating: Dataset/train/potato/132.jpg  
  inflating: Dataset/train/potato/133.jpg  
  inflating: Dataset/train/potato/134.jpg  
  inflating: Dataset/train/potato/135.jpg  
  inflating: Dataset/train/potato/136.jpg  
  inflating: Dataset/train/potato/137.jpg  
  inflating: Dataset/train/potato/138.jpg  
  inflating: Dataset/train/potato/14.jpg  
  inflating: Dataset/train/potato/15.jpg  
  inflating: Dataset/train/potato/16.jpg  
  inflating: Dataset/train/potato/17.jpg  
  inflating: Dataset/train/potato/18.jpg  
  inflating: Dataset/train/potato/19.jpg  
  inflating: Dataset/train/potato/2.jpg  
  inflating: Dataset/train/potato/20.jpg  
  inflating: Dataset/train/potato/21.jpg  
  inflating: Dataset/train/potato/22.jpg  
  inflating: Dataset/train/potato/23.jpg  
  inflating: Dataset/train/potato/24.jpg  
  inflating: Dataset/train/potato/25.jpg  
  inflating: Dataset/train/potato/26.jpg  
  inflating: Dataset/train/potato/27.jpg  
  inflating: Dataset/train/potato/28.jpg  
  inflating: Dataset/train/potato/29.jpg  
  inflating: Dataset/train/potato/3.jpg  
  inflating: Dataset/train/potato/30.jpg  
  inflating: Dataset/train/potato/31.jpg  
  inflating: Dataset/train/potato/32.jpg  
  inflating: Dataset/train/potato/33.jpg  
  inflating: Dataset/train/potato/34.jpg  
  inflating: Dataset/train/potato/35.jpg  
  inflating: Dataset/train/potato/36.jpg  
  inflating: Dataset/train/potato/37.jpg  
  inflating: Dataset/train/potato/38.jpg  
  inflating: Dataset/train/potato/39.jpg  
  inflating: Dataset/train/potato/4.jpg  
  inflating: Dataset/train/potato/40.jpg  
  inflating: Dataset/train/potato/41.jpg  
  inflating: Dataset/train/potato/42.jpg  
  inflating: Dataset/train/potato/43.jpg  
  inflating: Dataset/train/potato/44.jpg  
  inflating: Dataset/train/potato/45.jpg  
  inflating: Dataset/train/potato/46.jpg  
  inflating: Dataset/train/potato/47.jpg  
  inflating: Dataset/train/potato/48.jpg  
  inflating: Dataset/train/potato/49.jpg  
  inflating: Dataset/train/potato/5.jpg  
  inflating: Dataset/train/potato/50.jpg  
  inflating: Dataset/train/potato/51.jpg  
  inflating: Dataset/train/potato/52.jpg  
  inflating: Dataset/train/potato/53.jpg  
  inflating: Dataset/train/potato/54.jpg  
  inflating: Dataset/train/potato/55.jpg  
  inflating: Dataset/train/potato/56.jpg  
  inflating: Dataset/train/potato/57.jpg  
  inflating: Dataset/train/potato/58.jpg  
  inflating: Dataset/train/potato/59.jpg  
  inflating: Dataset/train/potato/6.jpg  
  inflating: Dataset/train/potato/60.jpg  
  inflating: Dataset/train/potato/61.jpg  
  inflating: Dataset/train/potato/62.jpg  
  inflating: Dataset/train/potato/63.jpg  
  inflating: Dataset/train/potato/64.jpg  
  inflating: Dataset/train/potato/65.jpg  
  inflating: Dataset/train/potato/66.jpg  
  inflating: Dataset/train/potato/67.jpg  
  inflating: Dataset/train/potato/68.jpg  
  inflating: Dataset/train/potato/69.jpg  
  inflating: Dataset/train/potato/7.jpg  
  inflating: Dataset/train/potato/70.jpg  
  inflating: Dataset/train/potato/71.jpg  
  inflating: Dataset/train/potato/72.jpg  
  inflating: Dataset/train/potato/73.jpg  
  inflating: Dataset/train/potato/74.jpg  
  inflating: Dataset/train/potato/75.jpg  
  inflating: Dataset/train/potato/76.jpg  
  inflating: Dataset/train/potato/77.jpg  
  inflating: Dataset/train/potato/78.jpg  
  inflating: Dataset/train/potato/79.jpg  
  inflating: Dataset/train/potato/8.jpg  
  inflating: Dataset/train/potato/80.jpg  
  inflating: Dataset/train/potato/81.jpg  
  inflating: Dataset/train/potato/82.jpg  
  inflating: Dataset/train/potato/83.jpg  
  inflating: Dataset/train/potato/84.jpg  
  inflating: Dataset/train/potato/85.jpg  
  inflating: Dataset/train/potato/86.jpg  
  inflating: Dataset/train/potato/87.jpg  
  inflating: Dataset/train/potato/88.jpg  
  inflating: Dataset/train/potato/89.jpg  
  inflating: Dataset/train/potato/9.jpg  
  inflating: Dataset/train/potato/90.jpg  
  inflating: Dataset/train/potato/91.jpg  
  inflating: Dataset/train/potato/92.jpg  
  inflating: Dataset/train/potato/93.jpg  
  inflating: Dataset/train/potato/94.jpg  
  inflating: Dataset/train/potato/95.jpg  
  inflating: Dataset/train/potato/96.jpg  
  inflating: Dataset/train/potato/97.jpg  
  inflating: Dataset/train/potato/98.jpg  
  inflating: Dataset/train/potato/99.jpg  
  inflating: Dataset/train/weed/1.jpg  
  inflating: Dataset/train/weed/10.jpg  
  inflating: Dataset/train/weed/100.jpg  
  inflating: Dataset/train/weed/101.jpg  
  inflating: Dataset/train/weed/102.jpg  
  inflating: Dataset/train/weed/103.jpg  
  inflating: Dataset/train/weed/104.jpg  
  inflating: Dataset/train/weed/105.jpg  
  inflating: Dataset/train/weed/106.jpg  
  inflating: Dataset/train/weed/107.jpg  
  inflating: Dataset/train/weed/108.jpg  
  inflating: Dataset/train/weed/109.jpg  
  inflating: Dataset/train/weed/11.jpg  
  inflating: Dataset/train/weed/110.jpg  
  inflating: Dataset/train/weed/111.jpg  
  inflating: Dataset/train/weed/112.jpg  
  inflating: Dataset/train/weed/113.jpg  
  inflating: Dataset/train/weed/114.jpg  
  inflating: Dataset/train/weed/115.jpg  
  inflating: Dataset/train/weed/116.jpg  
  inflating: Dataset/train/weed/117.jpg  
  inflating: Dataset/train/weed/118.jpg  
  inflating: Dataset/train/weed/119.jpg  
  inflating: Dataset/train/weed/12.jpg  
  inflating: Dataset/train/weed/120.jpg  
  inflating: Dataset/train/weed/121.jpg  
  inflating: Dataset/train/weed/122.jpg  
  inflating: Dataset/train/weed/123.jpg  
  inflating: Dataset/train/weed/124.jpg  
  inflating: Dataset/train/weed/125.jpg  
  inflating: Dataset/train/weed/126.jpg  
  inflating: Dataset/train/weed/127.jpg  
  inflating: Dataset/train/weed/128.jpg  
  inflating: Dataset/train/weed/129.jpg  
  inflating: Dataset/train/weed/13.jpg  
  inflating: Dataset/train/weed/130.jpg  
  inflating: Dataset/train/weed/131.jpg  
  inflating: Dataset/train/weed/132.jpg  
  inflating: Dataset/train/weed/133.jpg  
  inflating: Dataset/train/weed/134.jpg  
  inflating: Dataset/train/weed/135.jpg  
  inflating: Dataset/train/weed/136.jpg  
  inflating: Dataset/train/weed/137.jpg  
  inflating: Dataset/train/weed/138.jpg  
  inflating: Dataset/train/weed/139.jpg  
  inflating: Dataset/train/weed/14.jpg  
  inflating: Dataset/train/weed/140.jpg  
  inflating: Dataset/train/weed/141.jpg  
  inflating: Dataset/train/weed/142.jpg  
  inflating: Dataset/train/weed/143.jpg  
  inflating: Dataset/train/weed/144.jpg  
  inflating: Dataset/train/weed/145.jpg  
  inflating: Dataset/train/weed/146.jpg  
  inflating: Dataset/train/weed/147.jpg  
  inflating: Dataset/train/weed/148.jpg  
  inflating: Dataset/train/weed/149.jpg  
  inflating: Dataset/train/weed/15.jpg  
  inflating: Dataset/train/weed/150.jpg  
  inflating: Dataset/train/weed/151.jpg  
  inflating: Dataset/train/weed/152.jpg  
  inflating: Dataset/train/weed/153.jpg  
  inflating: Dataset/train/weed/154.jpg  
  inflating: Dataset/train/weed/155.jpg  
  inflating: Dataset/train/weed/156.jpg  
  inflating: Dataset/train/weed/157.jpg  
  inflating: Dataset/train/weed/158.jpg  
  inflating: Dataset/train/weed/159.jpg  
  inflating: Dataset/train/weed/16.jpg  
  inflating: Dataset/train/weed/160.jpg  
  inflating: Dataset/train/weed/161.jpg  
  inflating: Dataset/train/weed/162.jpg  
  inflating: Dataset/train/weed/163.jpg  
  inflating: Dataset/train/weed/164.jpg  
  inflating: Dataset/train/weed/165.jpg  
  inflating: Dataset/train/weed/166.jpg  
  inflating: Dataset/train/weed/167.jpg  
  inflating: Dataset/train/weed/168.jpg  
  inflating: Dataset/train/weed/169.jpg  
  inflating: Dataset/train/weed/17.jpg  
  inflating: Dataset/train/weed/170.jpg  
  inflating: Dataset/train/weed/171.jpg  
  inflating: Dataset/train/weed/172.jpg  
  inflating: Dataset/train/weed/173.jpg  
  inflating: Dataset/train/weed/174.jpg  
  inflating: Dataset/train/weed/175.jpg  
  inflating: Dataset/train/weed/176.jpg  
  inflating: Dataset/train/weed/177.jpg  
  inflating: Dataset/train/weed/178.jpg  
  inflating: Dataset/train/weed/179.jpg  
  inflating: Dataset/train/weed/18.jpg  
  inflating: Dataset/train/weed/180.jpg  
  inflating: Dataset/train/weed/181.jpg  
  inflating: Dataset/train/weed/182.jpg  
  inflating: Dataset/train/weed/183.jpg  
  inflating: Dataset/train/weed/184.jpg  
  inflating: Dataset/train/weed/185.jpg  
  inflating: Dataset/train/weed/186.jpg  
  inflating: Dataset/train/weed/187.jpg  
  inflating: Dataset/train/weed/188.jpg  
  inflating: Dataset/train/weed/189.jpg  
  inflating: Dataset/train/weed/19.jpg  
  inflating: Dataset/train/weed/190.jpg  
  inflating: Dataset/train/weed/191.jpg  
  inflating: Dataset/train/weed/192.jpg  
  inflating: Dataset/train/weed/193.jpg  
  inflating: Dataset/train/weed/194.jpg  
  inflating: Dataset/train/weed/195.jpg  
  inflating: Dataset/train/weed/196.jpg  
  inflating: Dataset/train/weed/197.jpg  
  inflating: Dataset/train/weed/198.jpg  
  inflating: Dataset/train/weed/2.jpg  
  inflating: Dataset/train/weed/20.jpg  
  inflating: Dataset/train/weed/21.jpg  
  inflating: Dataset/train/weed/22.jpg  
  inflating: Dataset/train/weed/23.jpg  
  inflating: Dataset/train/weed/24.jpg  
  inflating: Dataset/train/weed/25.jpg  
  inflating: Dataset/train/weed/26.jpg  
  inflating: Dataset/train/weed/27.jpg  
  inflating: Dataset/train/weed/28.jpg  
  inflating: Dataset/train/weed/29.jpg  
  inflating: Dataset/train/weed/3.jpg  
  inflating: Dataset/train/weed/30.jpg  
  inflating: Dataset/train/weed/31.jpg  
  inflating: Dataset/train/weed/32.jpg  
  inflating: Dataset/train/weed/33.jpg  
  inflating: Dataset/train/weed/34.jpg  
  inflating: Dataset/train/weed/35.jpg  
  inflating: Dataset/train/weed/36.jpg  
  inflating: Dataset/train/weed/37.jpg  
  inflating: Dataset/train/weed/38.jpg  
  inflating: Dataset/train/weed/39.jpg  
  inflating: Dataset/train/weed/4.jpg  
  inflating: Dataset/train/weed/40.jpg  
  inflating: Dataset/train/weed/41.jpg  
  inflating: Dataset/train/weed/42.jpg  
  inflating: Dataset/train/weed/43.jpg  
  inflating: Dataset/train/weed/44.jpg  
  inflating: Dataset/train/weed/45.jpg  
  inflating: Dataset/train/weed/46.jpg  
  inflating: Dataset/train/weed/47.jpg  
  inflating: Dataset/train/weed/48.jpg  
  inflating: Dataset/train/weed/49.jpg  
  inflating: Dataset/train/weed/5.jpg  
  inflating: Dataset/train/weed/50.jpg  
  inflating: Dataset/train/weed/51.jpg  
  inflating: Dataset/train/weed/52.jpg  
  inflating: Dataset/train/weed/53.jpg  
  inflating: Dataset/train/weed/54.jpg  
  inflating: Dataset/train/weed/55.jpg  
  inflating: Dataset/train/weed/56.jpg  
  inflating: Dataset/train/weed/57.jpg  
  inflating: Dataset/train/weed/58.jpg  
  inflating: Dataset/train/weed/59.jpg  
  inflating: Dataset/train/weed/6.jpg  
  inflating: Dataset/train/weed/60.jpg  
  inflating: Dataset/train/weed/61.jpg  
  inflating: Dataset/train/weed/62.jpg  
  inflating: Dataset/train/weed/63.jpg  
  inflating: Dataset/train/weed/64.jpg  
  inflating: Dataset/train/weed/65.jpg  
  inflating: Dataset/train/weed/66.jpg  
  inflating: Dataset/train/weed/67.jpg  
  inflating: Dataset/train/weed/68.jpg  
  inflating: Dataset/train/weed/69.jpg  
  inflating: Dataset/train/weed/7.jpg  
  inflating: Dataset/train/weed/70.jpg  
  inflating: Dataset/train/weed/71.jpg  
  inflating: Dataset/train/weed/72.jpg  
  inflating: Dataset/train/weed/73.jpg  
  inflating: Dataset/train/weed/74.jpg  
  inflating: Dataset/train/weed/75.jpg  
  inflating: Dataset/train/weed/76.jpg  
  inflating: Dataset/train/weed/77.jpg  
  inflating: Dataset/train/weed/78.jpg  
  inflating: Dataset/train/weed/79.jpg  
  inflating: Dataset/train/weed/8.jpg  
  inflating: Dataset/train/weed/80.jpg  
  inflating: Dataset/train/weed/81.jpg  
  inflating: Dataset/train/weed/82.jpg  
  inflating: Dataset/train/weed/83.jpg  
  inflating: Dataset/train/weed/84.jpg  
  inflating: Dataset/train/weed/85.jpg  
  inflating: Dataset/train/weed/86.jpg  
  inflating: Dataset/train/weed/87.jpg  
  inflating: Dataset/train/weed/88.jpg  
  inflating: Dataset/train/weed/89.jpg  
  inflating: Dataset/train/weed/9.jpg  
  inflating: Dataset/train/weed/90.jpg  
  inflating: Dataset/train/weed/91.jpg  
  inflating: Dataset/train/weed/92.jpg  
  inflating: Dataset/train/weed/93.jpg  
  inflating: Dataset/train/weed/94.jpg  
  inflating: Dataset/train/weed/95.jpg  
  inflating: Dataset/train/weed/96.jpg  
  inflating: Dataset/train/weed/97.jpg  
  inflating: Dataset/train/weed/98.jpg  
  inflating: Dataset/train/weed/99.jpg  
  inflating: Dataset/val/potato/139.jpg  
  inflating: Dataset/val/potato/140.jpg  
  inflating: Dataset/val/potato/141.jpg  
  inflating: Dataset/val/potato/142.jpg  
  inflating: Dataset/val/potato/143.jpg  
  inflating: Dataset/val/potato/144.jpg  
  inflating: Dataset/val/potato/145.jpg  
  inflating: Dataset/val/potato/146.jpg  
  inflating: Dataset/val/potato/147.jpg  
  inflating: Dataset/val/potato/148.jpg  
  inflating: Dataset/val/potato/149.jpg  
  inflating: Dataset/val/potato/150.jpg  
  inflating: Dataset/val/potato/151.jpg  
  inflating: Dataset/val/potato/152.jpg  
  inflating: Dataset/val/potato/153.jpg  
  inflating: Dataset/val/potato/154.jpg  
  inflating: Dataset/val/potato/155.jpg  
  inflating: Dataset/val/potato/156.jpg  
  inflating: Dataset/val/potato/157.jpg  
  inflating: Dataset/val/potato/158.jpg  
  inflating: Dataset/val/potato/159.jpg  
  inflating: Dataset/val/potato/160.jpg  
  inflating: Dataset/val/potato/161.jpg  
  inflating: Dataset/val/potato/162.jpg  
  inflating: Dataset/val/potato/163.jpg  
  inflating: Dataset/val/potato/164.jpg  
  inflating: Dataset/val/potato/165.jpg  
  inflating: Dataset/val/potato/166.jpg  
  inflating: Dataset/val/potato/167.jpg  
  inflating: Dataset/val/potato/168.jpg  
  inflating: Dataset/val/potato/169.jpg  
  inflating: Dataset/val/weed/199.jpg  
  inflating: Dataset/val/weed/200.jpg  
  inflating: Dataset/val/weed/201.jpg  
  inflating: Dataset/val/weed/202.jpg  
  inflating: Dataset/val/weed/203.jpg  
  inflating: Dataset/val/weed/204.jpg  
  inflating: Dataset/val/weed/205.jpg  
  inflating: Dataset/val/weed/206.jpg  
  inflating: Dataset/val/weed/207.jpg  
  inflating: Dataset/val/weed/208.jpg  
  inflating: Dataset/val/weed/209.jpg  
  inflating: Dataset/val/weed/210.jpg  
  inflating: Dataset/val/weed/211.jpg  
  inflating: Dataset/val/weed/212.jpg  
  inflating: Dataset/val/weed/213.jpg  
  inflating: Dataset/val/weed/214.jpg  
  inflating: Dataset/val/weed/215.jpg  
  inflating: Dataset/val/weed/216.jpg  
  inflating: Dataset/val/weed/217.jpg  
  inflating: Dataset/val/weed/218.jpg  
  inflating: Dataset/val/weed/219.jpg  
  inflating: Dataset/val/weed/220.jpg  
  inflating: Dataset/val/weed/221.jpg  
  inflating: Dataset/val/weed/222.jpg  
  inflating: Dataset/val/weed/223.jpg  
  inflating: Dataset/val/weed/224.jpg  
  inflating: Dataset/val/weed/225.jpg  
  inflating: Dataset/val/weed/226.jpg  
  inflating: Dataset/val/weed/227.jpg  
  inflating: Dataset/val/weed/228.jpg  
  inflating: Dataset/val/weed/229.jpg  
  inflating: Dataset/val/weed/230.jpg  
  inflating: Dataset/val/weed/231.jpg  
  inflating: Dataset/val/weed/232.jpg  
  inflating: Dataset/val/weed/233.jpg  
  inflating: Dataset/val/weed/234.jpg  
  inflating: Dataset/val/weed/235.jpg  
  inflating: Dataset/val/weed/236.jpg  
  inflating: Dataset/val/weed/237.jpg  
  inflating: Dataset/val/weed/238.jpg  
  inflating: Dataset/val/weed/239.jpg  
  inflating: Dataset/val/weed/240.jpg  
  inflating: Dataset/val/weed/241.jpg  
  inflating: Dataset/val/weed/242.jpg  
In [3]:
train_path = os.path.join(os.getcwd(), 'Dataset/train')
os.listdir(train_path)
Out[3]:
['potato', 'weed']

Function to Display Images

In [4]:
# Get image as numpy array
def load_image(name, path):
    img_path = os.path.join(path, name)
    img = cv2.imread(img_path)
   # img = load_img(img_path, target_size = (256, 256))
    img = img[:,:, ::-1]
    return img

# Plot numpy array
def plot_image(img, name, title):    
    plt.imshow(img)
    plt.suptitle(title)

# Plot a grid of examples
def plot_grid(img_names, img_root,  title ,rows=2, cols=3):
    fig = plt.figure(figsize=(8,8))

    for i,name in enumerate(img_names):      
        fig.add_subplot(rows,cols,i+1)
        img = load_image(name, img_root)
        plt.axis("off")
        plot_image(img, name, title)

    plt.show()

Visualizing Potato Plant

In [5]:
potato_path = os.path.join(train_path, "potato")
potato_images = os.listdir(potato_path)
plot_grid(potato_images[-6:], potato_path, title = "Potato Images")

Visualizing Weed Plant

In [6]:
weed_path = os.path.join(train_path, "weed")
weed_images = os.listdir(weed_path)
plot_grid(weed_images[-6:], weed_path, title = "Weed Images")

Define the Training and Validation Directories

In [7]:
train_path = os.path.join(os.getcwd(), 'Dataset', 'train')
validation_path = os.path.join(os.getcwd(), 'Dataset', 'val')
os.listdir(train_path)
Out[7]:
['potato', 'weed']

Pre-process Images by Image Data Generator

Defining the training and validation image data generator to expand our dataset of training and validation images. This is done by artificially generating new images by processing the exsisting images. By random flipping, rotating, scaling, and perfroming similar operations, we can create new images.

In [8]:
train_image_generator = ImageDataGenerator(
    rescale = 1.0/255.0,
    rotation_range = 40,
    width_shift_range = 0.2,
    height_shift_range = 0.2, 
    shear_range = 0.2,
    zoom_range = 0.2,
    horizontal_flip = True
)
valid_image_generator = ImageDataGenerator(
rescale = 1.0/2555.0)
In [9]:
batch_size = 5

training_images = train_image_generator.flow_from_directory(train_path, 
                                                            target_size = (64,64),
                                                            class_mode = 'categorical', 
                                                            batch_size = batch_size,
                                                            classes = os.listdir(train_path),                                                           
                                                            shuffle = True
                                                            #color_mode="grayscale"
                                                           )
validation_images = train_image_generator.flow_from_directory(validation_path, 
                                                             target_size = (64,64), 
                                                             class_mode = 'categorical', 
                                                             batch_size = batch_size,
                                                             classes = os.listdir(train_path),                                                          
                                                             shuffle = True
                                                            #color_mode="grayscale"
                                                            )
Found 336 images belonging to 2 classes.
Found 75 images belonging to 2 classes.

Defining Model Architecture

In [10]:
# Defining the architecture for our neural network model
model=Sequential()



model.add(Conv2D(50,(2,2), activation='relu', input_shape=(64,64,3)))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(50,(2,2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(50,(2,2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(50,(2,2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())

model.add(Dense(150,activation='relu'))
model.add(Dense(100,activation='relu'))
model.add(Dense(2, activation='sigmoid'))
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 63, 63, 50)        650       
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 31, 31, 50)        0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 30, 30, 50)        10050     
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 15, 15, 50)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 14, 14, 50)        10050     
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 7, 7, 50)          0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 6, 6, 50)          10050     
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 3, 3, 50)          0         
_________________________________________________________________
flatten (Flatten)            (None, 450)               0         
_________________________________________________________________
dense (Dense)                (None, 150)               67650     
_________________________________________________________________
dense_1 (Dense)              (None, 100)               15100     
_________________________________________________________________
dense_2 (Dense)              (None, 2)                 202       
=================================================================
Total params: 113,752
Trainable params: 113,752
Non-trainable params: 0
_________________________________________________________________
In [11]:
model.compile(loss="categorical_crossentropy",
              optimizer=Adam(learning_rate = 10 ** -4),
              metrics=['accuracy'])

Training the Model for 50 epochs

In [12]:
# Training the model
history = model.fit_generator(
    training_images,
    steps_per_epoch = training_images.samples // batch_size,
    validation_data = validation_images, 
    validation_steps = validation_images.samples // batch_size,
    epochs = 50,

)
WARNING:tensorflow:From <ipython-input-12-048a68b39cdd>:7: Model.fit_generator (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.
Instructions for updating:
Please use Model.fit, which supports generators.
Epoch 1/50
67/67 [==============================] - 4s 54ms/step - loss: 0.6841 - accuracy: 0.5770 - val_loss: 0.6792 - val_accuracy: 0.5867
Epoch 2/50
67/67 [==============================] - 3s 50ms/step - loss: 0.6764 - accuracy: 0.5952 - val_loss: 0.6786 - val_accuracy: 0.5867
Epoch 3/50
67/67 [==============================] - 3s 50ms/step - loss: 0.6808 - accuracy: 0.5861 - val_loss: 0.6781 - val_accuracy: 0.5867
Epoch 4/50
67/67 [==============================] - 3s 50ms/step - loss: 0.6767 - accuracy: 0.5952 - val_loss: 0.6787 - val_accuracy: 0.5867
Epoch 5/50
67/67 [==============================] - 4s 52ms/step - loss: 0.6766 - accuracy: 0.5921 - val_loss: 0.6776 - val_accuracy: 0.5867
Epoch 6/50
67/67 [==============================] - 3s 50ms/step - loss: 0.6787 - accuracy: 0.5861 - val_loss: 0.6801 - val_accuracy: 0.5867
Epoch 7/50
67/67 [==============================] - 3s 50ms/step - loss: 0.6786 - accuracy: 0.5952 - val_loss: 0.6768 - val_accuracy: 0.5867
Epoch 8/50
67/67 [==============================] - 3s 50ms/step - loss: 0.6784 - accuracy: 0.5861 - val_loss: 0.6766 - val_accuracy: 0.5867
Epoch 9/50
67/67 [==============================] - 3s 51ms/step - loss: 0.6723 - accuracy: 0.5952 - val_loss: 0.6759 - val_accuracy: 0.5867
Epoch 10/50
67/67 [==============================] - 3s 51ms/step - loss: 0.6738 - accuracy: 0.5921 - val_loss: 0.6760 - val_accuracy: 0.5867
Epoch 11/50
67/67 [==============================] - 3s 50ms/step - loss: 0.6730 - accuracy: 0.5921 - val_loss: 0.6736 - val_accuracy: 0.5867
Epoch 12/50
67/67 [==============================] - 3s 51ms/step - loss: 0.6766 - accuracy: 0.5861 - val_loss: 0.6731 - val_accuracy: 0.5867
Epoch 13/50
67/67 [==============================] - 3s 50ms/step - loss: 0.6697 - accuracy: 0.5921 - val_loss: 0.6712 - val_accuracy: 0.5867
Epoch 14/50
67/67 [==============================] - 3s 50ms/step - loss: 0.6644 - accuracy: 0.5891 - val_loss: 0.6667 - val_accuracy: 0.5867
Epoch 15/50
67/67 [==============================] - 3s 50ms/step - loss: 0.6587 - accuracy: 0.5982 - val_loss: 0.6656 - val_accuracy: 0.5867
Epoch 16/50
67/67 [==============================] - 3s 51ms/step - loss: 0.6509 - accuracy: 0.5952 - val_loss: 0.6565 - val_accuracy: 0.5867
Epoch 17/50
67/67 [==============================] - 3s 50ms/step - loss: 0.6520 - accuracy: 0.6012 - val_loss: 0.6384 - val_accuracy: 0.6000
Epoch 18/50
67/67 [==============================] - 3s 51ms/step - loss: 0.6198 - accuracy: 0.6616 - val_loss: 0.6341 - val_accuracy: 0.6000
Epoch 19/50
67/67 [==============================] - 3s 50ms/step - loss: 0.5847 - accuracy: 0.6979 - val_loss: 0.6224 - val_accuracy: 0.6267
Epoch 20/50
67/67 [==============================] - 3s 50ms/step - loss: 0.5543 - accuracy: 0.7221 - val_loss: 0.5777 - val_accuracy: 0.7200
Epoch 21/50
67/67 [==============================] - 3s 50ms/step - loss: 0.4980 - accuracy: 0.7764 - val_loss: 0.5172 - val_accuracy: 0.7867
Epoch 22/50
67/67 [==============================] - 3s 50ms/step - loss: 0.4895 - accuracy: 0.7462 - val_loss: 0.4743 - val_accuracy: 0.8400
Epoch 23/50
67/67 [==============================] - 3s 51ms/step - loss: 0.4835 - accuracy: 0.7613 - val_loss: 0.4405 - val_accuracy: 0.8533
Epoch 24/50
67/67 [==============================] - 3s 51ms/step - loss: 0.4019 - accuracy: 0.8218 - val_loss: 0.4437 - val_accuracy: 0.7867
Epoch 25/50
67/67 [==============================] - 3s 51ms/step - loss: 0.4477 - accuracy: 0.7885 - val_loss: 0.4058 - val_accuracy: 0.8400
Epoch 26/50
67/67 [==============================] - 3s 50ms/step - loss: 0.3383 - accuracy: 0.8912 - val_loss: 0.3426 - val_accuracy: 0.9067
Epoch 27/50
67/67 [==============================] - 3s 51ms/step - loss: 0.3465 - accuracy: 0.8399 - val_loss: 0.3285 - val_accuracy: 0.9067
Epoch 28/50
67/67 [==============================] - 3s 50ms/step - loss: 0.2956 - accuracy: 0.8852 - val_loss: 0.4177 - val_accuracy: 0.7867
Epoch 29/50
67/67 [==============================] - 3s 50ms/step - loss: 0.2890 - accuracy: 0.8882 - val_loss: 0.2170 - val_accuracy: 0.9467
Epoch 30/50
67/67 [==============================] - 3s 50ms/step - loss: 0.2408 - accuracy: 0.9154 - val_loss: 0.2383 - val_accuracy: 0.9467
Epoch 31/50
67/67 [==============================] - 3s 50ms/step - loss: 0.2543 - accuracy: 0.9003 - val_loss: 0.4682 - val_accuracy: 0.7333
Epoch 32/50
67/67 [==============================] - 3s 51ms/step - loss: 0.2309 - accuracy: 0.9366 - val_loss: 0.1853 - val_accuracy: 0.9200
Epoch 33/50
67/67 [==============================] - 3s 50ms/step - loss: 0.2033 - accuracy: 0.9275 - val_loss: 0.1815 - val_accuracy: 0.9467
Epoch 34/50
67/67 [==============================] - 3s 51ms/step - loss: 0.2476 - accuracy: 0.9003 - val_loss: 0.1753 - val_accuracy: 0.9600
Epoch 35/50
67/67 [==============================] - 3s 50ms/step - loss: 0.1882 - accuracy: 0.9396 - val_loss: 0.1530 - val_accuracy: 0.9733
Epoch 36/50
67/67 [==============================] - 3s 51ms/step - loss: 0.1844 - accuracy: 0.9366 - val_loss: 0.1877 - val_accuracy: 0.9333
Epoch 37/50
67/67 [==============================] - 3s 51ms/step - loss: 0.1417 - accuracy: 0.9486 - val_loss: 0.1422 - val_accuracy: 0.9600
Epoch 38/50
67/67 [==============================] - 3s 51ms/step - loss: 0.1424 - accuracy: 0.9637 - val_loss: 0.0922 - val_accuracy: 0.9733
Epoch 39/50
67/67 [==============================] - 3s 51ms/step - loss: 0.1340 - accuracy: 0.9577 - val_loss: 0.0778 - val_accuracy: 0.9867
Epoch 40/50
67/67 [==============================] - 3s 51ms/step - loss: 0.1499 - accuracy: 0.9486 - val_loss: 0.1236 - val_accuracy: 0.9733
Epoch 41/50
67/67 [==============================] - 3s 51ms/step - loss: 0.1278 - accuracy: 0.9607 - val_loss: 0.0587 - val_accuracy: 1.0000
Epoch 42/50
67/67 [==============================] - 3s 50ms/step - loss: 0.1262 - accuracy: 0.9517 - val_loss: 0.0995 - val_accuracy: 0.9867
Epoch 43/50
67/67 [==============================] - 3s 50ms/step - loss: 0.1146 - accuracy: 0.9698 - val_loss: 0.0951 - val_accuracy: 0.9467
Epoch 44/50
67/67 [==============================] - 3s 51ms/step - loss: 0.1153 - accuracy: 0.9517 - val_loss: 0.0935 - val_accuracy: 0.9467
Epoch 45/50
67/67 [==============================] - 3s 49ms/step - loss: 0.1143 - accuracy: 0.9698 - val_loss: 0.0851 - val_accuracy: 0.9600
Epoch 46/50
67/67 [==============================] - 3s 48ms/step - loss: 0.1059 - accuracy: 0.9577 - val_loss: 0.0623 - val_accuracy: 0.9867
Epoch 47/50
67/67 [==============================] - 3s 47ms/step - loss: 0.1136 - accuracy: 0.9607 - val_loss: 0.0586 - val_accuracy: 0.9867
Epoch 48/50
67/67 [==============================] - 3s 47ms/step - loss: 0.1093 - accuracy: 0.9637 - val_loss: 0.0409 - val_accuracy: 1.0000
Epoch 49/50
67/67 [==============================] - 3s 48ms/step - loss: 0.1023 - accuracy: 0.9668 - val_loss: 0.0588 - val_accuracy: 0.9733
Epoch 50/50
67/67 [==============================] - 3s 47ms/step - loss: 0.1145 - accuracy: 0.9577 - val_loss: 0.0791 - val_accuracy: 0.9600
In [13]:
# Function to plot "accuracy vs epoch" graphs and "loss vs epoch" graphs for training and validation data
def plot_metrics(model_name, metric = 'accuracy'):
    if metric == 'loss':
        plt.title("Loss Values")
        plt.plot(model_name.history['loss'], label = 'train')
        plt.plot(model_name.history['val_loss'], label = 'test')
        plt.legend()
        plt.show()
    else:
        plt.title("Accuracy Values")
        plt.plot(model_name.history['accuracy'], label='train') 
        plt.plot(model_name.history['val_accuracy'], label='test') 
        plt.legend()
        plt.show()
In [14]:
plot_metrics(history, 'accuracy')
plot_metrics(history, 'loss')

Evaluating Model Performance

In [15]:
prediction_loss, prediction_accuracy = model.evaluate(validation_images)
print("Prediction Accuracy: ", prediction_accuracy)
15/15 [==============================] - 0s 20ms/step - loss: 0.1324 - accuracy: 0.9200
Prediction Accuracy:  0.9200000166893005

Model Predictions

In [16]:
def predict(img_path, model):
    image = load_img(img_path)
    image = image.resize((64,64))
    image = img_to_array(image)
    image = np.expand_dims(image, axis = 0)
    pred = model.predict(image)
    #print(pred) 
    pred = np.argmax(pred)
       
    if pred == 0:
        return 'Potato'
    elif pred == 1:
        return "Weed"
In [17]:
img_path = os.path.join (potato_path, random.choice(os.listdir(potato_path)))
#print(img_path)
prediction = predict(img_path, model)
pred = 'Prediction is ' + prediction
img = cv2.imread(img_path)
# Convert Image for BGR to RGB format
img = img[:,:, ::-1]
plt.imshow(img)
plt.grid(False)
plt.axis("off")
plt.title(pred)
plt.suptitle("Actual Plant : Potato")
Out[17]:
Text(0.5, 0.98, 'Actual Plant : Potato')
In [18]:
img_path = os.path.join (weed_path, random.choice(os.listdir(weed_path)))
#print(img_path)
prediction = predict(img_path, model)
pred = 'Prediction is ' + prediction
img = cv2.imread(img_path)
# Convert Image for BGR to RGB format
img = img[:,:, ::-1]
plt.imshow(img)
plt.grid(False)
plt.axis("off")
plt.title(pred)
plt.suptitle("Actual Plant : Weed")
Out[18]:
Text(0.5, 0.98, 'Actual Plant : Weed')
In [19]:
model.save('best_model.h5')

DeepCC

In [ ]:
!deepCC "best_model.h5"
[INFO]
Reading [keras model] 'best_model.h5'
In [ ]: