In [1]:
# Import the required libraries
import tensorflow as tf
from tensorflow import keras
import tensorflow.keras.backend as K
from tensorflow.keras import layers
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Flatten, Activation
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
import os
import cv2
import numpy as np
import pickle
import random
In [2]:
# get and unzip the data
!wget -N "https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/Grapes-Leaf-Disease-data.zip"
!unzip -qo Grapes-Leaf-Disease-data.zip
!rm Grapes-Leaf-Disease-data.zip
In [3]:
dirtrain = 'train'
In [4]:
dirtest = 'test'
In [5]:
categories= ["Black_rot","Esca_(Black_Measles)","Healthy","Leaf_blight_(Isariopsis_Leaf_Spot)"]
In [6]:
data_dir = 'train'
print("Number of samples")
for f in os.listdir(data_dir + '/'):
if os.path.isdir(data_dir + '/' + f):
print(f, " : ", len(os.listdir(data_dir + '/' + f +'/')))
In [7]:
data_dir = 'test'
print("Number of samples")
for f in os.listdir(data_dir + '/'):
if os.path.isdir(data_dir + '/' + f):
print(f, " : ", len(os.listdir(data_dir + '/' + f +'/')))
In [8]:
for c in categories:
path=os.path.join(dirtrain,c)
for i in os.listdir(path):
img_array=cv2.imread(os.path.join(path,i))
print(img_array.shape)
plt.imshow(img_array)
plt.show()
break
break
In [9]:
training_data = []
def create_training_data():
count = []
for c in categories:
path = os.path.join(dirtrain,c)
class_num = categories.index(c)
c = 0
for i in os.listdir(path):
c = c+1
try:
img_array = cv2.imread(os.path.join(path,i))
training_data.append([img_array,class_num])
except Exception as e:
pass
count.append(c)
return count
count_train=create_training_data()
In [10]:
testing_data = []
def create_testing_data():
count=[]
for c in categories:
path = os.path.join(dirtest,c)
class_num = categories.index(c)
c = 0
for i in os.listdir(path):
c = c+1
try:
img_array = cv2.imread(os.path.join(path,i))
testing_data.append([img_array,class_num])
except Exception as e:
pass
count.append(c)
return count
count_test=create_testing_data()
In [11]:
print(len(training_data))
print(count_train)
print(len(testing_data))
print(count_test)
In [12]:
random.shuffle(training_data)
random.shuffle(testing_data)
In [13]:
x_train = []
y_train = []
x_test = []
y_test = []
In [14]:
for features, label in training_data:
x_train.append(features)
y_train.append(label)
x_train=np.array(x_train).reshape(-1,256,256,3)
In [15]:
x=cv2.resize(training_data[0][0],(256,256))
plt.imshow(x,cmap='gray')
Out[15]:
In [16]:
for features, label in testing_data:
x_test.append(features)
y_test.append(label)
x_test = np.array(x_test).reshape(-1,256,256,3)
In [17]:
def save_training_data(x_train,y_train):
pickle_out=open("x_train.pickle","wb")
pickle.dump(x_train,pickle_out)
pickle_out.close()
pickle_out=open("y_train.pickle","wb")
pickle.dump(y_train,pickle_out)
pickle_out.close
save_training_data(x_train,y_train)
In [18]:
def save_testing_data(x_test,y_test):
pickle_out=open("x_test.pickle","wb")
pickle.dump(x_test,pickle_out)
pickle_out.close()
pickle_out=open("y_test.pickle","wb")
pickle.dump(y_test,pickle_out)
pickle_out.close()
save_testing_data(x_test,y_test)
In [19]:
def load_data():
pickle_in = open("x_train.pickle","rb")
x_train = pickle.load(pickle_in)
return x_train
In [20]:
x_train.shape
Out[20]:
Build and train the model¶
In [21]:
K.clear_session()
model = Sequential()
model.add(layers.Conv2D(32,(3,3),padding='same',input_shape=(256,256,3),activation='relu'))
model.add(layers.Conv2D(32,(3,3),activation='relu'))
model.add(layers.MaxPool2D(pool_size=(8,8)))
model.add(layers.Conv2D(32,(3,3),padding='same',activation='relu'))
model.add(layers.Conv2D(32,(3,3),activation='relu'))
model.add(layers.MaxPool2D(pool_size=(8,8)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(layers.Dense(256,activation='relu'))
model.add(layers.Dense(4,activation='softmax'))
model.compile(loss = 'categorical_crossentropy',optimizer = 'rmsprop',metrics = ['accuracy'])
In [22]:
model.summary()
In [23]:
y_train_cat = to_categorical(y_train,4)
y_test_cat = to_categorical(y_test,4)
In [24]:
history = model.fit(x_train, y_train_cat, batch_size=32, epochs=10, verbose=1, validation_split=0.15, shuffle=True)
In [31]:
# save the model
model.save("leaf_disease_model.h5")
new_model = tf.keras.models.load_model("leaf_disease_model.h5")
In [32]:
#loss and accuracy of the model
loss, acc = new_model.evaluate(x_test,y_test_cat, verbose=2)
print('Restored model, accuracy: {:5.2f}%'.format(100*acc))
In [33]:
# train_loss vs Val_loss
from matplotlib import pyplot as plt
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
In [34]:
# train_accuracy vs val_accuracy
from matplotlib import pyplot as plt
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('acc')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
Predictions¶
In [35]:
img = cv2.resize(x_test[0],(256,256))
plt.imshow(img)
Out[35]:
In [36]:
img = img.reshape(-1,256,256,3)
In [37]:
predict_class = new_model.predict_classes(img)
predict_class
Out[37]:
In [38]:
categories[predict_class[0]]
Out[38]:
In [39]:
img0 = cv2.resize(x_test[199],(256,256))
plt.imshow(img0)
Out[39]:
In [40]:
img0 = img0.reshape(-1,256,256,3)
In [41]:
predict_class = new_model.predict_classes(img0)
predict_class
Out[41]:
In [42]:
categories[predict_class[0]]
Out[42]:
deepCC¶
In [43]:
!deepCC leaf_disease_model.h5
In [ ]: