# code for loading the format for the notebook
import os
# path : store the current path to convert back to it later
path = os.getcwd()
os.chdir(os.path.join('..', 'notebook_format'))
from formats import load_style
load_style(plot_style=False)
os.chdir(path)
# 1. magic to print version
# 2. magic so that the notebook will reload external python modules
%load_ext watermark
%load_ext autoreload
%autoreload 2
import numpy as np
import pandas as pd
from keras.datasets import mnist
from keras.utils import np_utils
from keras.optimizers import RMSprop
from keras.models import Sequential, load_model
from keras.layers.core import Dense, Dropout, Activation
%watermark -a 'Ethen' -d -t -v -p numpy,pandas,keras
Basic Keras API to build a simple multi-layer neural network.
n_classes = 10
n_features = 784 # mnist is a 28 * 28 image
# load the dataset and some preprocessing step that can be skipped
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, n_features)
X_test = X_test.reshape(10000, n_features)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# images takes values between 0 - 255, we can normalize it
# by dividing every number by 255
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices (one-hot encoding)
# note: you HAVE to to this step
Y_train = np_utils.to_categorical(y_train, n_classes)
Y_test = np_utils.to_categorical(y_test , n_classes)
Basics of training a model:
The easiest way to build models in keras is to use Sequential
model and the .add()
method to stack layers together in sequence to build up our network.
Dense
(fully-connected layers), where we specify how many nodes you wish to have for the layer. Since the first layer that we're going to add is the input layer, we have to make sure that the input_dim
parameter matches the number of features (columns) in the training set. Then after the first layer, we don't need to specify the size of the input anymore.Activation
function for that layer, and add a Dropout
layer if we wish.Dense
and Activation
layer we need to specify the number of class as the output and softmax to tell it to output the predicted class's probability.# define the model
model = Sequential()
model.add(Dense(512, input_dim = n_features))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(n_classes))
model.add(Activation('softmax'))
# we can check the summary to check the number of parameters
model.summary()
Once our model looks good, we can configure its learning process with .compile()
, where you need to specify which optimizer
to use, and the loss
function ( categorical_crossentropy
is the typical one for multi-class classification) and the metrics
to track.
Finally, .fit()
the model by passing in the training, validation set, the number of epochs and batch size. For the batch size, we typically specify this number to be power of 2 for computing efficiency.
model.compile(loss = 'categorical_crossentropy', optimizer = RMSprop(), metrics = ['accuracy'])
n_epochs = 10
batch_size = 128
history = model.fit(
X_train,
Y_train,
batch_size = batch_size,
epochs = n_epochs,
verbose = 1, # set it to 0 if we don't want to have progess bars
validation_data = (X_test, Y_test)
)
# history attribute stores the training and validation score and loss
history.history
# .evaluate gives the loss and metric evaluation score for the dataset,
# here the result matches the validation set's history above
print('metrics: ', model.metrics_names)
score = model.evaluate(X_test, Y_test, verbose = 0)
score
# stores the weight of the model,
# it's a list, note that the length is 6 because we have 3 dense layer
# and each one has it's associated bias term
weights = model.get_weights()
print(len(weights))
# W1 should have 784, 512 for the 784
# feauture column and the 512 the number
# of dense nodes that we've specified
W1, b1, W2, b2, W3, b3 = weights
print(W1.shape)
print(b1.shape)
# predict the accuracy
y_pred = model.predict_classes(X_test, verbose = 0)
accuracy = np.sum(y_test == y_pred) / X_test.shape[0]
print('valid accuracy: %.2f' % (accuracy * 100))
It is not recommended to use pickle or cPickle to save a Keras model. By saving it as a HDF5 file, we can preserve the configuration and weights of the model.
model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
del model # deletes the existing model
# returns a compiled model
# identical to the previous one
model = load_model('my_model.h5')
# testing: predict the accuracy using the loaded model
y_pred = model.predict_classes(X_test, verbose = 0)
accuracy = np.sum(y_test == y_pred) / X_test.shape[0]
print('valid accuracy: %.2f' % (accuracy * 100))