Cats vs Dogs

Loading our images

  • Images are labeled catxxx.jpg and dogxxx.jpg
In [18]:
# Get filenames in list
from os import listdir
from os.path import isfile, join

mypath = "./datasets/images/"

file_names = [f for f in listdir(mypath) if isfile(join(mypath, f))]

print(str(len(file_names)) + ' images loaded')
3000 images loaded

Splitting our loaded images into a training and test/validation dataset

  • We also need to store their labels (i.e. y_train and y_test)
  • We re-size our images here to maintain a constant dimension of 150 x 150
  • We're going to use 1000 images of dogs and 1000 images of cats as our training data
  • For our test/validation dataset we're going to use 500 of each class
  • Dogs will be labels 1 and cats 0
  • We store our new images in the following directories
    • /datasets/catsvsdogs/train/dogs
    • /datasets/catsvsdogs/train/cats
    • /datasets/catsvsdogs/validation/dogs
    • /datasets/catsvsdogs/validation/cats
In [19]:
import cv2
import numpy as np
import sys
import os
import shutil

# Extract 1000 for our training data and 500 for our validation set
# Takes about ~20 seconds to run
dog_count = 0
cat_count = 0
training_size = 1000
test_size = 500
training_images = []
training_labels = []
test_images = []
test_labels = []
size = 150
dog_dir_train = "./datasets/catsvsdogs/train/dogs/"
cat_dir_train = "./datasets/catsvsdogs/train/cats/"
dog_dir_val = "./datasets/catsvsdogs/validation/dogs/"
cat_dir_val = "./datasets/catsvsdogs/validation/cats/"

def make_dir(directory):
        if os.path.exists(directory):
            shutil.rmtree(directory)
        os.makedirs(directory)

make_dir(dog_dir_train)
make_dir(cat_dir_train)
make_dir(dog_dir_val)
make_dir(cat_dir_val)

def getZeros(number):
    if(number > 10 and number < 100):
        return "0"
    if(number < 10):
        return "00"
    else:
        return ""

for i, file in enumerate(file_names):
    
    if file_names[i][0] == "d":
        dog_count += 1
        image = cv2.imread(mypath+file)
        image = cv2.resize(image, (size, size), interpolation = cv2.INTER_AREA)
        if dog_count <= training_size:
            training_images.append(image)
            training_labels.append(1)
            zeros = getZeros(dog_count)
            cv2.imwrite(dog_dir_train + "dog" + str(zeros) + str(dog_count) + ".jpg", image)
        if dog_count > training_size and dog_count <= training_size+test_size:
            test_images.append(image)
            test_labels.append(1)
            zeros = getZeros(dog_count-1000)
            cv2.imwrite(dog_dir_val + "dog" + str(zeros) + str(dog_count-1000) + ".jpg", image)
            
    if file_names[i][0] == "c":
        cat_count += 1
        image = cv2.imread(mypath+file)
        image = cv2.resize(image, (size, size), interpolation = cv2.INTER_AREA)
        if cat_count <= training_size:
            training_images.append(image)
            training_labels.append(0)
            zeros = getZeros(cat_count)
            cv2.imwrite(cat_dir_train + "cat" + str(zeros) + str(cat_count) + ".jpg", image)
        if cat_count > training_size and cat_count <= training_size+test_size:
            test_images.append(image)
            test_labels.append(0)
            zeros = getZeros(cat_count-1000)
            cv2.imwrite(cat_dir_val + "cat" + str(zeros) + str(cat_count-1000) + ".jpg", image)

    if dog_count == training_size+test_size and cat_count == training_size+test_size:
        break

print("Training and Test Data Extraction Complete")
Training and Test Data Extraction Complete

Let's save our dataset's to NPZ files

In [20]:
# Using numpy's savez function to store our loaded data as NPZ files
np.savez('cats_vs_dogs_training_data.npz', np.array(training_images))
np.savez('cats_vs_dogs_training_labels.npz', np.array(training_labels))
np.savez('cats_vs_dogs_test_data.npz', np.array(test_images))
np.savez('cats_vs_dogs_test_labels.npz', np.array(test_labels))
In [17]:
# Loader Function
import numpy as np

def load_data_training_and_test(datasetname):
    
    npzfile = np.load(datasetname + "_training_data.npz")
    train = npzfile['arr_0']
    
    npzfile = np.load(datasetname + "_training_labels.npz")
    train_labels = npzfile['arr_0']
    
    npzfile = np.load(datasetname + "_test_data.npz")
    test = npzfile['arr_0']
    
    npzfile = np.load(datasetname + "_test_labels.npz")
    test_labels = npzfile['arr_0']

    return (train, train_labels), (test, test_labels)

Let's view some of our loaded images

In [21]:
for i in range(1,11):
    random = np.random.randint(0, len(training_images))
    cv2.imshow("image_"+str(i), training_images[random])
    if training_labels[random] == 0:
        print(str(i) + " - Cat")
    else:
        print(str(i)+ " - Dog")
    cv2.waitKey(0)
    
cv2.destroyAllWindows()
1 - Cat
2 - Cat
3 - Dog
4 - Cat
5 - Cat
6 - Dog
7 - Cat
8 - Dog
9 - Dog
10 - Cat

Let's get our data ready in the format expected by Keras

  • We also stick the previous naming convention
In [22]:
(x_train, y_train), (x_test, y_test) = load_data_training_and_test("cats_vs_dogs")

# Reshaping our label data from (2000,) to (2000,1) and test data from (1000,) to (1000,1)
y_train = y_train.reshape(y_train.shape[0], 1)
y_test = y_test.reshape(y_test.shape[0], 1)

# Change our image type to float32 data type
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')

# Normalize our data by changing the range from (0 to 255) to (0 to 1)
x_train /= 255
x_test /= 255

print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
(2000, 150, 150, 3)
(2000, 1)
(1000, 150, 150, 3)
(1000, 1)

Let's create our model using a simple CNN that similar to what we used for CIFAR10

  • Except now we use a Sigmoid instead of Softmax
  • **Sigmoids are used when we're doing binary (i.e. two class) classification
  • Note the binary_crossentropy loss
In [23]:
from __future__ import print_function
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import os

batch_size = 16
epochs = 25

img_rows = x_train[0].shape[0]
img_cols = x_train[1].shape[0]
input_shape = (img_rows, img_cols, 3)

model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

print(model.summary())
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_10 (Conv2D)           (None, 148, 148, 32)      896       
_________________________________________________________________
activation_16 (Activation)   (None, 148, 148, 32)      0         
_________________________________________________________________
max_pooling2d_10 (MaxPooling (None, 74, 74, 32)        0         
_________________________________________________________________
conv2d_11 (Conv2D)           (None, 72, 72, 32)        9248      
_________________________________________________________________
activation_17 (Activation)   (None, 72, 72, 32)        0         
_________________________________________________________________
max_pooling2d_11 (MaxPooling (None, 36, 36, 32)        0         
_________________________________________________________________
conv2d_12 (Conv2D)           (None, 34, 34, 64)        18496     
_________________________________________________________________
activation_18 (Activation)   (None, 34, 34, 64)        0         
_________________________________________________________________
max_pooling2d_12 (MaxPooling (None, 17, 17, 64)        0         
_________________________________________________________________
flatten_4 (Flatten)          (None, 18496)             0         
_________________________________________________________________
dense_7 (Dense)              (None, 64)                1183808   
_________________________________________________________________
activation_19 (Activation)   (None, 64)                0         
_________________________________________________________________
dropout_4 (Dropout)          (None, 64)                0         
_________________________________________________________________
dense_8 (Dense)              (None, 1)                 65        
_________________________________________________________________
activation_20 (Activation)   (None, 1)                 0         
=================================================================
Total params: 1,212,513
Trainable params: 1,212,513
Non-trainable params: 0
_________________________________________________________________
None

Training our model

In [12]:
history = model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          validation_data=(x_test, y_test),
          shuffle=True)

model.save("/home/deeplearningcv/DeepLearningCV/Trained Models/cats_vs_dogs_V1.h5")

# Evaluate the performance of our trained model
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
Train on 2000 samples, validate on 1000 samples
Epoch 1/25
2000/2000 [==============================] - 54s 27ms/step - loss: 0.7265 - acc: 0.5370 - val_loss: 0.6787 - val_acc: 0.5560
Epoch 2/25
2000/2000 [==============================] - 48s 24ms/step - loss: 0.6771 - acc: 0.6130 - val_loss: 0.6390 - val_acc: 0.6190
Epoch 3/25
2000/2000 [==============================] - 48s 24ms/step - loss: 0.6238 - acc: 0.6685 - val_loss: 0.5971 - val_acc: 0.6990
Epoch 4/25
2000/2000 [==============================] - 47s 23ms/step - loss: 0.5637 - acc: 0.7245 - val_loss: 0.6073 - val_acc: 0.6460
Epoch 5/25
2000/2000 [==============================] - 47s 24ms/step - loss: 0.5267 - acc: 0.7630 - val_loss: 0.5333 - val_acc: 0.7400
Epoch 6/25
2000/2000 [==============================] - 49s 25ms/step - loss: 0.4472 - acc: 0.7990 - val_loss: 0.5195 - val_acc: 0.7400
Epoch 7/25
2000/2000 [==============================] - 48s 24ms/step - loss: 0.3900 - acc: 0.8260 - val_loss: 0.6277 - val_acc: 0.7130
Epoch 8/25
2000/2000 [==============================] - 48s 24ms/step - loss: 0.3340 - acc: 0.8605 - val_loss: 0.5841 - val_acc: 0.7460
Epoch 9/25
2000/2000 [==============================] - 48s 24ms/step - loss: 0.2733 - acc: 0.8855 - val_loss: 0.6216 - val_acc: 0.7340
Epoch 10/25
2000/2000 [==============================] - 49s 24ms/step - loss: 0.2395 - acc: 0.9035 - val_loss: 0.7391 - val_acc: 0.7370
Epoch 11/25
2000/2000 [==============================] - 49s 24ms/step - loss: 0.1922 - acc: 0.9260 - val_loss: 0.9434 - val_acc: 0.7220
Epoch 12/25
2000/2000 [==============================] - 48s 24ms/step - loss: 0.1434 - acc: 0.9465 - val_loss: 1.1205 - val_acc: 0.7360
Epoch 13/25
2000/2000 [==============================] - 50s 25ms/step - loss: 0.1150 - acc: 0.9565 - val_loss: 1.2319 - val_acc: 0.7030
Epoch 14/25
2000/2000 [==============================] - 50s 25ms/step - loss: 0.0959 - acc: 0.9645 - val_loss: 1.3856 - val_acc: 0.7340
Epoch 15/25
2000/2000 [==============================] - 49s 25ms/step - loss: 0.0959 - acc: 0.9680 - val_loss: 1.1597 - val_acc: 0.6950
Epoch 16/25
2000/2000 [==============================] - 49s 25ms/step - loss: 0.0923 - acc: 0.9670 - val_loss: 1.2969 - val_acc: 0.7210
Epoch 17/25
2000/2000 [==============================] - 49s 24ms/step - loss: 0.0769 - acc: 0.9715 - val_loss: 1.4416 - val_acc: 0.7360
Epoch 18/25
2000/2000 [==============================] - 57s 29ms/step - loss: 0.0703 - acc: 0.9780 - val_loss: 1.6973 - val_acc: 0.7290
Epoch 19/25
2000/2000 [==============================] - 54s 27ms/step - loss: 0.0497 - acc: 0.9830 - val_loss: 1.9353 - val_acc: 0.7250
Epoch 20/25
2000/2000 [==============================] - 58s 29ms/step - loss: 0.0723 - acc: 0.9760 - val_loss: 1.7130 - val_acc: 0.7330
Epoch 21/25
2000/2000 [==============================] - 56s 28ms/step - loss: 0.0627 - acc: 0.9785 - val_loss: 1.6741 - val_acc: 0.7280
Epoch 22/25
2000/2000 [==============================] - 54s 27ms/step - loss: 0.1039 - acc: 0.9685 - val_loss: 2.2343 - val_acc: 0.7030
Epoch 23/25
2000/2000 [==============================] - 52s 26ms/step - loss: 0.0669 - acc: 0.9780 - val_loss: 1.3467 - val_acc: 0.7270
Epoch 24/25
2000/2000 [==============================] - 53s 27ms/step - loss: 0.0613 - acc: 0.9750 - val_loss: 2.3629 - val_acc: 0.7280
Epoch 25/25
2000/2000 [==============================] - 50s 25ms/step - loss: 0.0552 - acc: 0.9785 - val_loss: 2.1670 - val_acc: 0.7110
1000/1000 [==============================] - 7s 7ms/step
Test loss: 2.166996139526367
Test accuracy: 0.711

Testing our Classifier

In [24]:
import cv2
import numpy as np
from keras.models import load_model

classifier = load_model('/home/deeplearningcv/DeepLearningCV/Trained Models/cats_vs_dogs_V1.h5')

def draw_test(name, pred, input_im):
    BLACK = [0,0,0]
    if pred == "[0]":
        pred = "cat"
    if pred == "[1]":
        pred = "dog"
    expanded_image = cv2.copyMakeBorder(input_im, 0, 0, 0, imageL.shape[0] ,cv2.BORDER_CONSTANT,value=BLACK)
    #expanded_image = cv2.cvtColor(expanded_image, cv2.COLOR_GRAY2BGR)
    cv2.putText(expanded_image, str(pred), (252, 70) , cv2.FONT_HERSHEY_COMPLEX_SMALL,4, (0,255,0), 2)
    cv2.imshow(name, expanded_image)


for i in range(0,10):
    rand = np.random.randint(0,len(x_test))
    input_im = x_test[rand]

    imageL = cv2.resize(input_im, None, fx=2, fy=2, interpolation = cv2.INTER_CUBIC)
    cv2.imshow("Test Image", imageL)

    input_im = input_im.reshape(1,150,150,3) 
    
    ## Get Prediction
    res = str(classifier.predict_classes(input_im, 1, verbose = 0)[0])

    draw_test("Prediction", res, imageL) 
    cv2.waitKey(0)

cv2.destroyAllWindows()

Analysis

  • Our results aren't bad, but they could be better

Now let's train our Cats vs Dogs Classifier using Data Augmentation

In [25]:
import os
import numpy as np
from keras.models import Sequential
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D
from keras import optimizers
import scipy
import pylab as pl
import matplotlib.cm as cm
%matplotlib inline

input_shape = (150, 150, 3)
img_width = 150
img_height = 150

nb_train_samples = 2000
nb_validation_samples = 1000
batch_size = 16
epochs = 25

train_data_dir = './datasets/catsvsdogs/train'
validation_data_dir = './datasets/catsvsdogs/validation'

# Creating our data generator for our test data
validation_datagen = ImageDataGenerator(
    # used to rescale the pixel values from [0, 255] to [0, 1] interval
    rescale = 1./255)

# Creating our data generator for our training data
train_datagen = ImageDataGenerator(
      rescale = 1./255,              # normalize pixel values to [0,1]
      rotation_range = 30,           # randomly applies rotations
      width_shift_range = 0.3,       # randomly applies width shifting
      height_shift_range = 0.3,      # randomly applies height shifting
      horizontal_flip = True,        # randonly flips the image
      fill_mode = 'nearest')         # uses the fill mode nearest to fill gaps created by the above

# Specify criteria about our training data, such as the directory, image size, batch size and type 
# automagically retrieve images and their classes for train and validation sets
train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size = (img_width, img_height),
        batch_size = batch_size,
        class_mode = 'binary',
        shuffle = True)

validation_generator = validation_datagen.flow_from_directory(
        validation_data_dir,
        target_size = (img_width, img_height),
        batch_size = batch_size,
        class_mode = 'binary',
        shuffle = False)    
Found 2000 images belonging to 2 classes.
Found 1000 images belonging to 2 classes.

Create our model, just like we did previously

In [5]:
# Creating out model
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))

print(model.summary())

model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_4 (Conv2D)            (None, 148, 148, 32)      896       
_________________________________________________________________
activation_6 (Activation)    (None, 148, 148, 32)      0         
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 (None, 74, 74, 32)        0         
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 72, 72, 32)        9248      
_________________________________________________________________
activation_7 (Activation)    (None, 72, 72, 32)        0         
_________________________________________________________________
max_pooling2d_5 (MaxPooling2 (None, 36, 36, 32)        0         
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 34, 34, 64)        18496     
_________________________________________________________________
activation_8 (Activation)    (None, 34, 34, 64)        0         
_________________________________________________________________
max_pooling2d_6 (MaxPooling2 (None, 17, 17, 64)        0         
_________________________________________________________________
flatten_2 (Flatten)          (None, 18496)             0         
_________________________________________________________________
dense_3 (Dense)              (None, 64)                1183808   
_________________________________________________________________
activation_9 (Activation)    (None, 64)                0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 64)                0         
_________________________________________________________________
dense_4 (Dense)              (None, 1)                 65        
_________________________________________________________________
activation_10 (Activation)   (None, 1)                 0         
=================================================================
Total params: 1,212,513
Trainable params: 1,212,513
Non-trainable params: 0
_________________________________________________________________
None
In [26]:
history = model.fit_generator(
    train_generator,
    steps_per_epoch = nb_train_samples // batch_size,
    epochs = epochs,
    validation_data = validation_generator,
    validation_steps = nb_validation_samples // batch_size)
Epoch 1/25
125/125 [==============================] - 76s 605ms/step - loss: 0.7353 - acc: 0.5215 - val_loss: 0.6896 - val_acc: 0.5212
Epoch 2/25
125/125 [==============================] - 67s 538ms/step - loss: 0.6934 - acc: 0.5505 - val_loss: 0.6767 - val_acc: 0.6057
Epoch 3/25
125/125 [==============================] - 61s 484ms/step - loss: 0.6884 - acc: 0.5665 - val_loss: 0.6812 - val_acc: 0.5152
Epoch 4/25
125/125 [==============================] - 66s 528ms/step - loss: 0.6871 - acc: 0.6070 - val_loss: 0.6295 - val_acc: 0.6667
Epoch 5/25
125/125 [==============================] - 75s 602ms/step - loss: 0.6636 - acc: 0.6025 - val_loss: 0.6360 - val_acc: 0.6362
Epoch 6/25
125/125 [==============================] - 66s 532ms/step - loss: 0.6502 - acc: 0.6290 - val_loss: 0.6081 - val_acc: 0.6636
Epoch 7/25
125/125 [==============================] - 82s 657ms/step - loss: 0.6456 - acc: 0.6320 - val_loss: 0.5880 - val_acc: 0.6890
Epoch 8/25
125/125 [==============================] - 61s 491ms/step - loss: 0.6263 - acc: 0.6430 - val_loss: 0.5426 - val_acc: 0.7185
Epoch 9/25
125/125 [==============================] - 52s 419ms/step - loss: 0.6673 - acc: 0.6590 - val_loss: 0.5660 - val_acc: 0.7043
Epoch 10/25
125/125 [==============================] - 52s 415ms/step - loss: 0.6310 - acc: 0.6475 - val_loss: 0.5528 - val_acc: 0.7154
Epoch 11/25
125/125 [==============================] - 53s 421ms/step - loss: 0.6321 - acc: 0.6670 - val_loss: 0.6197 - val_acc: 0.6809
Epoch 12/25
125/125 [==============================] - 50s 403ms/step - loss: 0.6176 - acc: 0.6740 - val_loss: 0.5395 - val_acc: 0.7307
Epoch 13/25
125/125 [==============================] - 50s 403ms/step - loss: 0.6205 - acc: 0.6720 - val_loss: 0.5686 - val_acc: 0.7022
Epoch 14/25
125/125 [==============================] - 50s 403ms/step - loss: 0.6107 - acc: 0.6695 - val_loss: 0.5949 - val_acc: 0.6850
Epoch 15/25
125/125 [==============================] - 51s 405ms/step - loss: 0.6243 - acc: 0.6805 - val_loss: 0.5391 - val_acc: 0.7348
Epoch 16/25
125/125 [==============================] - 50s 402ms/step - loss: 0.6218 - acc: 0.6680 - val_loss: 0.5603 - val_acc: 0.7226
Epoch 17/25
125/125 [==============================] - 50s 402ms/step - loss: 0.6181 - acc: 0.6815 - val_loss: 0.5870 - val_acc: 0.6463
Epoch 18/25
125/125 [==============================] - 52s 418ms/step - loss: 0.6097 - acc: 0.6935 - val_loss: 0.5511 - val_acc: 0.7378
Epoch 19/25
125/125 [==============================] - 70s 560ms/step - loss: 0.6117 - acc: 0.6840 - val_loss: 0.6247 - val_acc: 0.6972
Epoch 20/25
125/125 [==============================] - 78s 625ms/step - loss: 0.6123 - acc: 0.6830 - val_loss: 0.5259 - val_acc: 0.7530
Epoch 21/25
125/125 [==============================] - 77s 616ms/step - loss: 0.5895 - acc: 0.7020 - val_loss: 0.5110 - val_acc: 0.7490
Epoch 22/25
125/125 [==============================] - 79s 632ms/step - loss: 0.6088 - acc: 0.6865 - val_loss: 0.5251 - val_acc: 0.7307
Epoch 23/25
125/125 [==============================] - 77s 617ms/step - loss: 0.5969 - acc: 0.7005 - val_loss: 0.5204 - val_acc: 0.7398
Epoch 24/25
125/125 [==============================] - 76s 605ms/step - loss: 0.6232 - acc: 0.6885 - val_loss: 0.5314 - val_acc: 0.7449
Epoch 25/25
125/125 [==============================] - 76s 606ms/step - loss: 0.6042 - acc: 0.7030 - val_loss: 0.5229 - val_acc: 0.7581

Plotting our Loss and Accuracy Graphs

In [27]:
# Plotting our loss charts
import matplotlib.pyplot as plt

history_dict = history.history

loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
epochs = range(1, len(loss_values) + 1)

line1 = plt.plot(epochs, val_loss_values, label='Validation/Test Loss')
line2 = plt.plot(epochs, loss_values, label='Training Loss')
plt.setp(line1, linewidth=2.0, marker = '+', markersize=10.0)
plt.setp(line2, linewidth=2.0, marker = '4', markersize=10.0)
plt.xlabel('Epochs') 
plt.ylabel('Loss')
plt.grid(True)
plt.legend()
plt.show()
In [28]:
# Plotting our accuracy charts
import matplotlib.pyplot as plt

history_dict = history.history

acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
epochs = range(1, len(loss_values) + 1)

line1 = plt.plot(epochs, val_acc_values, label='Validation/Test Accuracy')
line2 = plt.plot(epochs, acc_values, label='Training Accuracy')
plt.setp(line1, linewidth=2.0, marker = '+', markersize=10.0)
plt.setp(line2, linewidth=2.0, marker = '4', markersize=10.0)
plt.xlabel('Epochs') 
plt.ylabel('Accuracy')
plt.grid(True)
plt.legend()
plt.show()