I am building a cascaded model (an autoencoder model stacked with a classifier). The input to the autoencoder is a set of images and the output of the autoencoder will be fed in to a pretrained classifier.
auto_input= Input(shape=(ch, height, width), name='x_autoen')
auto_output = autoencoder(auto_input)
auto_model = Model(input=auto_input, output=auto_output)
class_output = classifier(auto_output)
class_model = Model(input=auto_output, output=class_output)
cascade_model = Model(input=auto_input, output=[auto_output, class_output])
load_classifier_weights(cascade_model, classifier_weights_path)
auto_model.compile(optimizer='sgd', loss='mean_squared_error')
class_model.compile(optimizer='sgd', loss='binary_crossentropy')
cascade_model.compile(optimizer='sgd', loss='binary_crossentropy')
but this returns the following error.
File "xxxx.py", line 33, in build_model
class_model = Model(input=auto_output, output=class_output)
File "/xxx/local/lib/python2.7/site-packages/keras/engine/topology.py", line 1987, in __init__
str(layers_with_complete_input))
RuntimeError: Graph disconnected: cannot obtain value for tensor x_autoen at layer "x_autoen". The following previous layers were accessed without issue: []
The classifier code:
def classifier(inputs):
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Dropout(0.2)(conv1)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
#
conv2 = Convolution2D(64, 3, 3, activation='relu'', border_mode='same')(pool1)
conv2 = Dropout(0.2)(conv2)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
#
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Dropout(0.2)(conv3)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
up1 = merge([UpSampling2D(size=(2, 2))(conv3), conv2], mode='concat', concat_axis=1)#192x24x24
conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up1)
conv4 = Dropout(0.2)(conv4)
conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv4)
#
up2 = merge([UpSampling2D(size=(2, 2))(conv4), conv1],, mode='concat', concat_axis=1)#96x48x48
conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up2)
conv5 = Dropout(0.2)(conv5)
conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv5)
#
conv6 = Convolution2D(2, 1, 1, activation='relu', border_mode='same')(conv5)
conv6 = core.Reshape((2,patch_height*patch_width))(conv6)
conv6 = core.Permute((2,1))(conv6)
conv7 = core.Activation('softmax')(conv6)
return conv7
Error after making corrections according to the Daniel's comment:
ValueError: The model expects 2 input arrays, but only received one array. Found: array with shape (1000, 1, 48, 48)
This was the code I used for training the cascaded network.
cascade_model .fit(imgs_train, imgs_train, nb_epoch=epochs, batch_size=batch_size, verbose=2, shuffle=True, validation_split=0.1, callbacks=[checkpointer])
enter code here