0

I'm using sample code from Keras blogs (with a few tweaks) but when running my model's loss and accuracy metrics aren't improving.

I'm not sure if implementing some function incorrectly.

I'm loading images from a saved file(h5py) and in small batches.

import numpy as np
from scipy.misc import imread, imresize
import cv2
import matplotlib.pyplot as plt

from keras.layers import Conv2D, MaxPooling2D, Input, Flatten, Dense
from keras.models import Model
import keras

#model layers

input_img = Input(shape=(299, 299, 3))

tower_1 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img)
tower_1 = Conv2D(64, (3, 3), padding='same', activation='relu')(tower_1)

tower_2 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img)
tower_2 = Conv2D(64, (5, 5), padding='same', activation='relu')(tower_2)

tower_3 = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(input_img)
tower_3 = Conv2D(64, (1, 1), padding='same', activation='relu')(tower_3)

concatenated_layer = keras.layers.concatenate([tower_1, tower_2, tower_3], axis=3)
conv1 = Conv2D(3,(3,3), padding = 'same', activation = 'relu')(concatenated_layer)
flatten = Flatten()(conv1)
dense_1 = Dense(500, activation = 'relu')(flatten)
predictions = Dense(12, activation = 'softmax')(dense_1)


#initialize and compile model


model = Model(inputs= input_img, output = predictions)
SGD =keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)

model.compile(optimizer=SGD,
              loss='categorical_crossentropy',
              metrics=['accuracy'])



#Load images

import loading_hdf5_files
hdf5_path =r'C:\Users\Moondra\Desktop\Keras Applications\training.hdf5' 
batches = loading_hdf5_files.load_batches(12, hdf5_path, classes = 12)

for i in range(10):
    #creating a new generator
    batches = loading_hdf5_files.load_batches(8, hdf5_path, classes = 12)

    for i in range(15):
        x,y = next(batches)
        #plt.imshow(x[0])
        #plt.show()
        x = (x/255).astype('float32')  # trying to save memory
        data =model.train_on_batch(x/255,y)
        print('loss : {:.5},  accuracy :  {:.2%}'.format(*data))

My output

This is the last 50 steps or so, but no change from the first step:

loss : 2.4226,  accuracy :  100.00%
loss : 2.4122,  accuracy :  100.00%
loss : 2.542,  accuracy :  0.00%
loss : 2.4793,  accuracy :  0.00%
loss : 2.4934,  accuracy :  0.00%
loss : 2.5132,  accuracy :  0.00%
loss : 2.4949,  accuracy :  0.00%
loss : 2.472,  accuracy :  0.00%
loss : 2.4616,  accuracy :  0.00%
loss : 2.4865,  accuracy :  0.00%
loss : 2.5585,  accuracy :  0.00%
loss : 2.4406,  accuracy :  0.00%
loss : 2.4882,  accuracy :  0.00%
loss : 2.4311,  accuracy :  0.00%
loss : 2.4895,  accuracy :  0.00%
loss : 2.502,  accuracy :  0.00%
loss : 2.4913,  accuracy :  0.00%
loss : 2.4585,  accuracy :  0.00%
loss : 2.4846,  accuracy :  0.00%
loss : 2.5143,  accuracy :  0.00%
loss : 2.4505,  accuracy :  0.00%
loss : 2.5574,  accuracy :  0.00%
loss : 2.5458,  accuracy :  0.00%
loss : 2.4311,  accuracy :  0.00%
loss : 2.4963,  accuracy :  0.00%
loss : 2.4212,  accuracy :  100.00%
loss : 2.4896,  accuracy :  0.00%
loss : 2.4824,  accuracy :  0.00%
loss : 2.4886,  accuracy :  0.00%
loss : 2.5135,  accuracy :  0.00%
loss : 2.4156,  accuracy :  100.00%
loss : 2.511,  accuracy :  0.00%
loss : 2.484,  accuracy :  0.00%
loss : 2.4965,  accuracy :  0.00%
loss : 2.5457,  accuracy :  0.00%
loss : 2.5343,  accuracy :  0.00%
loss : 2.5185,  accuracy :  0.00%
loss : 2.4902,  accuracy :  0.00%
loss : 2.4137,  accuracy :  100.00%
loss : 2.5271,  accuracy :  0.00%
loss : 2.5111,  accuracy :  0.00%
loss : 2.5014,  accuracy :  0.00%
loss : 2.4908,  accuracy :  0.00%
loss : 2.4904,  accuracy :  0.00%
Moondra
  • 4,399
  • 9
  • 46
  • 104
  • First of all, your accuracy is always either 100% or 0%. Is that average loss for an epoch? – Evan Weissburg Nov 20 '17 at 21:40
  • Yeah average loss for the batch (not epoch). As for the accuracy, I'm using the built in metrics, which I'm assuming to be an average accuracy for the batch. – Moondra Nov 20 '17 at 21:43

1 Answers1

0

Training for longer periods of time seems to be fixing the problem.

Moondra
  • 4,399
  • 9
  • 46
  • 104