1

The code snippet below is a vanila implementation of a TensorFlow model in which I am using subclass model and a custom fit function (implemented through train_step and test_step). The code works fine in the eager execution mode (default mode of execution in TF2.0) but fails in the graph mode.

import numpy as np
import tensorflow as tf

class Encoder(tf.keras.Model):
    def __init__(self):
        super(Encoder, self).__init__(name = 'Encoder')
        self.input_layer   = tf.keras.layers.Dense(10)
        self.hidden_layer1 = tf.keras.layers.Dense(10)
        self.dropout_laye1 = tf.keras.layers.Dropout(0.2)
        self.hidden_layer2 = tf.keras.layers.Dense(10)        
        self.dropout_laye2 = tf.keras.layers.Dropout(0.2)
        self.hidden_layer3 = tf.keras.layers.Dense(10)
        self.dropout_laye3 = tf.keras.layers.Dropout(0.2)           
        self.output_layer  = tf.keras.layers.Dense(1)
        
    def call(self, input_data, training):
        fx = self.input_layer(input_data)        
        fx = self.hidden_layer1(fx)
        if training:
            fx = self.dropout_laye1(fx)     
        fx = self.hidden_layer2(fx)
        if training:
            fx = self.dropout_laye2(fx) 
        fx = self.hidden_layer3(fx)
        if training:
            fx = self.dropout_laye3(fx) 
        return self.output_layer(fx)

class CustomModelV1(tf.keras.Model):
    def __init__(self):
        super(CustomModelV1, self).__init__()
        self.encoder = Encoder()
    
    def train_step(self, data):
        # Unpack the data. Its structure depends on your model and
        # on what you pass to `fit()`.
        x, y = data

        with tf.GradientTape() as tape:
            y_pred = self.encoder(x, training=True)  # Forward pass
            # Compute the loss value
            # (the loss function is configured in `compile()`)
            loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)

        # Compute gradients
        trainable_vars = self.trainable_variables
        gradients = tape.gradient(loss, trainable_vars)
        
        # Update weights
        self.optimizer.apply_gradients(zip(gradients, trainable_vars))
        
        # Update metrics (includes the metric that tracks the loss)
        self.compiled_metrics.update_state(y, y_pred)
        
        # Return a dict mapping metric names to current value
        return {m.name: m.result() for m in self.metrics} 

# Just use `fit` as usual
x = tf.data.Dataset.from_tensor_slices(np.random.random((1000, 32)))

y_numpy = np.random.random((1000, 1))
y = tf.data.Dataset.from_tensor_slices(y_numpy)

x_window = x.window(30, shift=10, stride=1)
flat_x = x_window.flat_map(lambda t: t)
flat_x_scaled = flat_x.map(lambda t: t * 2)

y_window = y.window(30, shift=10, stride=1)
flat_y = y_window.flat_map(lambda t: t)
flat_y_scaled = flat_y.map(lambda t: t * 2)

z = tf.data.Dataset.zip((flat_x_scaled, flat_y_scaled)).batch(32).cache().shuffle(buffer_size=32).prefetch(buffer_size=tf.data.experimental.AUTOTUNE)


# Construct and compile an instance of CustomModel
model = CustomModelV1()
model.compile(optimizer="adam", loss="mse", metrics=["mae"])

model.fit(z, epochs=3)

The code works well in the eager mode but throws following error in the graph mode. I disabled the eager execution using tf.compat.v1.disable_eager_execution().

AttributeError                            Traceback (most recent call last)
<ipython-input-4-f7a5b420f08f> in <module>
     27 model.compile(optimizer="adam", loss="mse", metrics=["mae"])
     28 
---> 29 model.fit(z, epochs=3)

~\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\training_v1.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
    793 
    794     func = self._select_training_loop(x)
--> 795     return func.fit(
    796         self,
    797         x=x,

~\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\training_arrays_v1.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, **kwargs)
    617                                                      steps_per_epoch, x)
    618 
--> 619     x, y, sample_weights = model._standardize_user_data(
    620         x,
    621         y,

~\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\training_v1.py in _standardize_user_data(self, x, y, sample_weight, class_weight, batch_size, check_steps, steps_name, steps, validation_split, shuffle, extract_tensors_from_dataset)
   2328     is_compile_called = False
   2329     if not self._is_compiled and self.optimizer:
-> 2330       self._compile_from_inputs(all_inputs, y_input, x, y)
   2331       is_compile_called = True
   2332 

~\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\training_v1.py in _compile_from_inputs(self, all_inputs, target, orig_inputs, orig_target)
   2548       # We need to use `y` to set the model targets.
   2549       if training_utils_v1.has_tensors(target):
-> 2550         target = training_utils_v1.cast_if_floating_dtype_and_mismatch(
   2551             target, self.outputs)
   2552       training_utils_v1.validate_input_types(

~\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\training_utils_v1.py in cast_if_floating_dtype_and_mismatch(targets, outputs)
   1377   if tensor_util.is_tf_type(targets):
   1378     # There is one target, so output[0] should be the only output.
-> 1379     return cast_single_tensor(targets, dtype=outputs[0].dtype)
   1380   new_targets = []
   1381   for target, out in zip(targets, outputs):

AttributeError: 'NoneType' object has no attribute 'dtype'
Milan Jain
  • 459
  • 7
  • 17

0 Answers0