Saturday, November 18, 2017

Improved version of dog vs cat of Keras

I improved a little using adam and selu. I am not sure if it actually is better, but seems like it is working better. Original version of dog vs cat is here.


from __future__ import print_function

import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.layers.noise import AlphaDropout
from keras.preprocessing.image import ImageDataGenerator
from keras.models import model_from_json
from keras.layers import Dense, Dropout, Activation
from keras.preprocessing.text import Tokenizer
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf
import os.path

f_log = './log'
f_model = './model/dogvscat'
model_yaml = 'dogvscat_model.yaml'
model_filename = 'dogvscat_model.json'
weights_filename = 'dogvscat_model_weights.hdf5'

batch_size = 200
epochs = 2
nb_validation_samples = 25000

print('Building model...')


if os.path.isfile(os.path.join(f_model,model_filename)):
    print('Saved parameters found. I will use this file...')
    json_string = open(os.path.join(f_model, model_filename)).read()
    model = model_from_json(json_string)
    model.summary()
    model.compile(loss='categorical_crossentropy',
                optimizer='adam',
                metrics=['accuracy'])
    model.load_weights(os.path.join(f_model,weights_filename))
else:
    print('Saved parameters Not found. Creating new model...')
    model = Sequential()
    model.add(Conv2D(32, (3, 3), input_shape=(128, 128, 3)))
    model.add(Activation('selu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('selu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(64, kernel_initializer='lecun_normal'))
    model.add(Activation('selu'))
    model.add(AlphaDropout(0.1))
    model.add(Dense(2))
    model.add(Activation('softmax'))

    model.summary()

    model.compile(loss='categorical_crossentropy',
                optimizer='adam',
                metrics=['accuracy'])

train_datagen = ImageDataGenerator(
    rescale=1.0 / 255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1.0 / 255)

train_generator = train_datagen.flow_from_directory(
    'data/train',
    target_size=(128, 128),
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=True)

validation_generator = test_datagen.flow_from_directory(
    'data/validation',
    target_size=(128, 128),
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=True)

tb_cb = keras.callbacks.TensorBoard(log_dir=f_log, histogram_freq=0)
cp_cb = keras.callbacks.ModelCheckpoint(filepath = os.path.join(f_model,weights_filename), monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
cbks = [tb_cb, cp_cb]

history = model.fit_generator(
    train_generator,
    steps_per_epoch=np.ceil(nb_validation_samples/batch_size),
    epochs=epochs,
    validation_data=validation_generator,
    validation_steps=np.ceil(nb_validation_samples/batch_size),
    callbacks=cbks
    )

score = model.evaluate_generator(validation_generator, nb_validation_samples)

print('')
print('Test score:', score[0])
print('Test accuracy:', score[1])

json_string = model.to_json()
open(os.path.join(f_model,model_filename), 'w').write(json_string)
yaml_string = model.to_yaml()
open(os.path.join(f_model,model_yaml), 'w').write(yaml_string)
print('save weights')
model.save_weights(os.path.join(f_model,weights_filename))