0

Hello guys I am trying to make pretrained VGG16 on Keras But it keeps give me error:

ValueError: Error when checking target: expected activation_1 to have shape (2622,) but got array with shape (1,)

I was trying to create the model based on this poster : Link

Also, I took the pre-trained weight from here. This weight can be read on here

This my code:

from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense, ZeroPadding2D
from keras import backend as K


# dimensions of our images.
img_width, img_height = 224, 224

train_data_dir = 'database/train'
validation_data_dir = 'database/validation'
nb_train_samples = 2000
nb_validation_samples = 800
epochs = 50
batch_size = 16

if K.image_data_format() == 'channels_first':
    input_shape = (3, img_width, img_height)
else:
    input_shape = (img_width, img_height, 3)

# build the VGG16 network
model = applications.VGG16(weights='imagenet', include_top=False)
print('VGG Pretrained Model loaded.')

model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(Conv2D(4096, (7, 7), activation='relu'))
model.add(Dropout(0.5))
model.add(Conv2D(4096, (1, 1), activation='relu'))
model.add(Dropout(0.5))
model.add(Conv2D(2622, (1, 1)))
model.add(Flatten())
model.add(Activation('softmax'))

# model.load_weights('./vgg16_face_weights.h5')
#
# vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)

model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
    rescale=1. / 224,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True)

# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 224)

train_generator = train_datagen.flow_from_directory(
    train_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,
    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,
    class_mode='binary')

model.fit_generator(
    train_generator,
    steps_per_epoch=nb_train_samples // batch_size,
    epochs=epochs,
    validation_data=validation_generator,
    validation_steps=nb_validation_samples // batch_size)

model.save_weights('first_try.h5')
flyingduck92
  • 1,449
  • 2
  • 23
  • 39

1 Answers1

0

You probably have only one folder inside 'database/train' and 'database/validation'.

Please make sure you have 2622 folders in the two folders so that keras can generate the label correctly.

Following is an example showing that the label should have shape of (batch_size, 2622).

# the above remains the same
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

import numpy as np

classes = 2622
batch_size = 4
y = np.zeros((batch_size, classes))
for i in range(batch_size):
    y[i, np.random.choice(classes)] = 1

model.fit(x=np.random.random((batch_size,)+input_shape), y=y, batch_size=batch_size)

model.save_weights('first_try.h5')

EDIT:

To change the last Conv2D layer from 2622 filters to 12 filters while maintaining the loaded weights, here is a workaround:

#define model and load_weights
#......

#build a new model based on the last model
conv = Conv2D(12, (1, 1))(model.layers[-4].output)
flatten = Flatten()(conv)
softmax = Activation('softmax')(flatten)
final_model = Model(inputs=model.input, outputs=softmax)

Ref:Cannot add layers to saved Keras Model. 'Model' object has no attribute 'add'

keineahnung2345
  • 2,635
  • 4
  • 13
  • 28
  • i try to make it to recognise 12 person only (12 folder). But the VGG-16 Pretrained model use 2622 face. So I would like to use that weight (which has 2622 face) to be injected to my model. How to do that? – flyingduck92 Jan 14 '19 at 23:36