forked from AkashKobal/arecanut-diseases-detection
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsudocode.py
182 lines (156 loc) · 6.5 KB
/
sudocode.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
# importing the required modules
import tensorflow as tf
from tensorflow.keras import models, layers
import matplotlib.pyplot as plt
IMAGE_SIZE = 256
BATCH_SIZE = 32
dataset = tf.keras.preprocessing.image_dataset_from_directory(
"Arecanut_dataset/train/",
shuffle=True,
batch_size = BATCH_SIZE
)
class_names = dataset.class_names #folder name are our class name
class_names
for image_batch, label_batch in dataset.take(1):
print(image_batch.shape)
print(label_batch.numpy())
len(dataset)
dataset
for image_batch, label_batch in dataset.take(1):
print(image_batch[0]) # get the first image data in tensor
for image_batch, label_batch in dataset.take(1):
print(image_batch[0].numpy())# convert tensor into numpy
for image_batch, label_batch in dataset.take(1):
print(image_batch[0].shape)#get the shape of the dataset
CHANNELS = 3
#visualization of data
for image_batch, label_batch in dataset.take(1):
plt.imshow(image_batch[0].numpy().astype("uint8"))
plt.axis("off") # hide x and y-axis
#visualization of data
plt.figure(figsize=(10,10))
for image_batch, label_batch in dataset.take(1):
for i in range(12): # displaying the 12 images
ax = plt.subplot(3,4,i+1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(class_names[label_batch[i]])# assigning the title by using index no. of the dataset
plt.axis("off") # hide x and y-axis
#lets try with 50 epochs
EPOCHS = 50
train_size = 0.8
len(dataset) * train_size
def get_dataset_partitions_tf(ds,train_split = 0.8, val_split = 0.1, test_split = 0.1,shuffle = True, shuffle_size = 10000):
ds_size = len(ds)
if shuffle:
ds = ds.shuffle(shuffle_size, seed = 12) #seed is because we should not get same images, seed may be any number
train_size = int(train_split * ds_size) #convert into integer
val_size = int(val_split * ds_size)
train_ds = ds.take(train_size)
val_ds = ds.skip(train_size).take(val_size) # first skip and then take the dataset
test_ds = ds.skip(train_size).take(val_size)
return train_ds, val_ds, test_ds
train_ds, val_ds, test_ds = get_dataset_partitions_tf(dataset)
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size = tf.data.AUTOTUNE) # first it will read the image from the disk and stores in the memory
val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size = tf.data.AUTOTUNE) # first it will read the image from the disk and stores in the memory
test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size = tf.data.AUTOTUNE) # first it will read the image from the disk and stores in the memory
resizing_and_rescaling = tf.keras.Sequential([
layers.experimental.preprocessing.Rescaling(1.0/255)
])
data_augumentation = tf.keras.Sequential([
layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"),
layers.experimental.preprocessing.RandomRotation(0.2),
])
from tensorflow.keras.layers import Conv2D, MaxPooling2D
#Model Building CNN
input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, CHANNELS)
n_classes = 3 # our no. of classes(folders) are three
model = models.Sequential([
resizing_and_rescaling, # first resizing_and_rescaling
layers.Conv2D(32, kernel_size = (3,3), activation='relu', input_shape=input_shape),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, kernel_size = (3,3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, kernel_size = (3,3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(n_classes, activation='softmax'),
])
model.build(input_shape=input_shape)
model.summary()
from keras import utils as np_utils
model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy']
)
history = model.fit(
train_ds,
batch_size=BATCH_SIZE,
validation_data=val_ds,
verbose=1,
epochs=5,
)
score = model.evaluate(test_ds)
score
history
history.params # parameters
history.history.keys()
history.history['accuracy']
len(history.history['accuracy'])
# visualize the train and accuracy
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(range(len(acc)), acc, label='Training Accuracy')
plt.plot(range(len(val_acc)), val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
#graphs shows the increase in accuracy
plt.subplot(1, 2, 2)
plt.plot(range(len(loss)), loss, label='Training Loss')
plt.plot(range(len(val_loss)), val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# graphs shows the loss
import numpy as np
for images_batch, labels_batch in test_ds.take(1):
first_image = images_batch[0].numpy().astype('uint8')
first_label = labels_batch[0].numpy()
print("first image to predict")
plt.imshow(first_image)
print("actual label:",class_names[first_label])
batch_prediction = model.predict(images_batch)
print("predicted label:",class_names[np.argmax(batch_prediction[0])])
# function which takes model and image as input and tells predict class and confidence
def predict(model, img):
img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy())
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
predicted_class = class_names[np.argmax(predictions[0])]
confidence = round(100 * (np.max(predictions[0])), 2)
return predicted_class, confidence
plt.figure(figsize=(15, 15))
for images, labels in test_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
predicted_class, confidence = predict(model, images[i].numpy())
actual_class = class_names[labels[i]]
plt.title(f"Actual: {actual_class},\n Predicted: {predicted_class}.\n Confidence: {confidence}%")
plt.axis("off")
# save the model
import os
# automatically create the version of the model
model_version=max([int(i) for i in os.listdir(r"C:\Users\Admin\Desktop") if i.isdigit()] + [0])+1
model.save(f"../models/{model_version}")