1

I have a stacked model of 5 EfficientNetB3 models. I am trying to get the predictions out of it. Following are the dataset and model building:

Dataset:

def build_decoder(with_labels=True, target_size=(300, 300), ext='jpg'):
    def decode(path):
        file_bytes = tf.io.read_file(path)
        if ext == 'png':
           img = tf.image.decode_png(file_bytes, channels=3)
        elif ext in ['jpg', 'jpeg']:
           img = tf.image.decode_jpeg(file_bytes, channels=3)
        else:
           raise ValueError("Image extension not supported")

        img = tf.cast(img, tf.float32) / 255.0
        img = tf.image.resize(img, target_size)

        return img

   def decode_with_labels(path, label):
       return decode(path), label

   return decode_with_labels if with_labels else decode


def build_dataset(paths, labels=None, bsize=32, cache=True, decode_fn=None, augment_fn=None, augment=True, repeat=True, shuffle=1024, cache_dir=""):
   if cache_dir != "" and cache is True:
       os.makedirs(cache_dir, exist_ok=True)

   if decode_fn is None:
       decode_fn = build_decoder(labels is not None)

   if augment_fn is None:
       augment_fn = build_augmenter(labels is not None)

   AUTO = tf.data.experimental.AUTOTUNE
   slices = paths if labels is None else (paths, labels)

   dset = tf.data.Dataset.from_tensor_slices(slices)
   dset = dset.map(decode_fn, num_parallel_calls=AUTO)
   dset = dset.cache(cache_dir) if cache else dset
   dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset
   dset = dset.repeat() if repeat else dset
   dset = dset.shuffle(shuffle) if shuffle else dset
   dset = dset.batch(bsize).prefetch(AUTO)

   return dset


test_decoder = build_decoder(with_labels=False, target_size=(300, 300), ext='png')
dtest = build_dataset(test_paths, bsize=2, repeat=False, shuffle=False, augment=False, cache=False, decode_fn=test_decoder)

Stacked model:

with strategy.scope():
   models = load_all_models()

   for i, model in enumerate(models):
       model.layers[1]._name = f'effnet_layer{i}'
       for layer in model.layers:
           layer._name = layer.name + str(f"_{i}")
           layer.trainable = False

   ensemble_visible = [model.input for model in models]
   ensemble_outputs = [model.output for model in models]
   merge = tf.keras.layers.concatenate(ensemble_outputs)
   merge = tf.keras.layers.Dense(10, activation='relu')(merge)
   output = tf.keras.layers.Dense(n_labels, activation='softmax')(merge)
   model = tf.keras.models.Model(inputs=ensemble_visible, outputs=output)

   model.compile(optimizer=tf.keras.optimizers.Adam(),
            loss='categorical_crossentropy',
            metrics=[tf.keras.metrics.AUC(multi_label=True)])

I am trying to make predictions using the following code segment:

X = tf.data.Dataset.zip((dtest, dtest, dtest, dtest, dtest))
X_pred = []
for image in X.take(-1):
    X_pred.append(image)

sub_df[label_cols] = stack_model.predict(X_pred, verbose=1)

But, I am getting the following error:

ValueError: Layer model expects 5 input(s), but it received 3035 input tensors. Inputs received: [<tf.Tensor 'IteratorGetNext:0' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:1' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:2' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:3' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:4' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:5' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:6' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:7' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:8' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:9' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:10' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:11' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:12' shape=(None, 300, 300, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:13' shape=(None, 300, 300, 3) dtype=float32>,....... and so on

How can I solve this error? Btw, the dataset has 1214 images. Thanks in advance.

Arthur
  • 21
  • 2
  • A good way to do this is to print `model.summary ()`. This shows the structure of the network, along with the dimensions of each layer, including the first one. `ensemble_visible = [model.input for model in models]` , what is contains ensemble_visible ? – Francesco Sep 16 '21 at 11:02

0 Answers0