0

I am trying to implement a deep dream model using ResNet. there are many problems appear, one of them is the proble says that the object being unpacked is of type "NoneType," which means it has a value of None. In Python, you cannot unpack an object of type "NoneType" because it is not iterable. sometimes there is an error says that the value being converted is of type "NoneType" and that this type is unsupported. This means that TensorFlow cannot create an EagerTensor from a value of None.

this is a piece of code we try to use for solving error but it fail. and the second code is the all code

def deep_dream(input_image, model, steps=100, step_size=0.01):
    # Define the loss and the optimizer
    loss, intermediate_layer_model = calc_loss(input_image, model)
    optimizer = tf.optimizers.SGD(learning_rate=step_size)

    # Keep a list to hold the evolution of the image
    image_list = []

    # Run the optimization
    for i in range(steps):
        with tf.GradientTape() as tape:
            tape.watch(input_image)
            loss = calc_loss(input_image, model)[0]
        grads, = tape.gradient(loss, input_image)
        grads = tape.gradient(loss, input_image)
        if grads is None:
          return
        optimizer.apply_gradients([(grads, input_image)])
        image_list.append(input_image.numpy().copy())
        
    # Return the final image
    return input_image
# Load the ResNet50 model
#model = ResNet50(weights='imagenet')
from tensorflow import keras
model = keras.applications.ResNet50(weights='imagenet', include_top=False)
# Iterate over the layers in the ResNet50 model
for layer in model.layers:
  print(f'{layer.name}---> {layer.output_shape}')


import cv2

# Function to calculate the loss
def calc_loss(input_image, model):
    input_image_batch = tf.expand_dims(input_image, axis=0)
    preprocessed_input = preprocess_input(input_image_batch.numpy().copy())

    # Get the activations of a specific layer
    layer_name = "conv5_block2_1_bn"
    intermediate_layer_model = tf.keras.models.Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
    intermediate_output = intermediate_layer_model(preprocessed_input)


    # Define the loss
    loss = tf.math.reduce_mean(intermediate_output)
    return loss, intermediate_layer_model





def deep_dream(input_image, model, steps=100, step_size=0.01):
    for i in range(steps):
        with tf.GradientTape() as tape:
            # Compute the loss
            loss, model_ = calc_loss(input_image, model)

        # Get the gradients of the input image with respect to the loss
        grads = tape.gradient(loss, input_image)

        # Normalize the gradients
        grads /= tf.math.reduce_std(grads) + 1e-8

        # Update the input image
        input_image += grads * step_size
        input_image = tf.clip_by_value(input_image, 0, 255)

    return input_image


# Load an image

# Load an image
#img_path = '/content/drive/MyDrive/Baghdad images/market.png'
#img = cv2.imread(img_path)
#img = cv2.resize(img, (224, 224))
#img = np.array(img, dtype=float)

# Preprocess the image
#original_image = np.copy(img)
#img = preprocess_input(np.expand_dims(img, axis=0))



img_path = cv2.resize(cv2.imread('/content/drive/MyDrive/Baghdad images/market.png'), (224, 224))

#img = image.load_img(img_path, target_size=(224, 224))
img = image.img_to_array(img)

# Preprocess the image
original_image = np.copy(img)
#img = preprocess_input(np.expand_dims(img, axis=0))

#input_image.set_shape([1,224,224,3])

#input_image = tf.constant(img, dtype=tf.float32)
#input_image = tf.expand_dims(input_image, axis=0)
#input_image = tf.squeeze(input_image, axis=0)

# Convert the image to a Tensor
input_image = tf.constant(img, dtype=tf.float32)

# Run the deep dream algorithm
dream_img = deep_dream(input_image, model)

# Deprocess the image
dream_img = tf.clip_by_value(dream_img[0], 0, 255).numpy().astype('uint8')

# Plot the original and dream images
plt.figure(figsize=(10,10))
plt.subplot(121)
plt.imshow(original_image.astype('uint8'))
plt.axis('off')
plt.title('Original Image')

plt.subplot(122)
plt.imshow(dream_img)
plt.axis('off')
plt.title('Dream Image')
plt.show()

I try to build deep dream model with ResNet to generate deep dream image, but the result is an error.

this error

TypeError                                 Traceback (most recent call last)
<ipython-input-95-b14eebcfe038> in <module>
      1 # Run the deep dream algorithm
----> 2 dream_img = deep_dream(input_image, model)

<ipython-input-92-27c78a6e0618> in deep_dream(input_image, model, steps, step_size)
     12             tape.watch(input_image)
     13             loss = calc_loss(input_image, model)[0]
---> 14         grads, = tape.gradient(loss, input_image)
     15         grads = tape.gradient(loss, input_image)
     16         if grads is None:

TypeError: cannot unpack non-iterable NoneType object

and this error

ValueError                                Traceback (most recent call last)
<ipython-input-48-09c4ef33f56c> in <module>
     72 
     73 # Run the deep dream algorithm
---> 74 dream_img = deep_dream(input_image, model)
     75 
     76 # Deprocess the image

16 frames
/usr/local/lib/python3.8/dist-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
     96       dtype = dtypes.as_dtype(dtype).as_datatype_enum
     97   ctx.ensure_initialized()
---> 98   return ops.EagerTensor(value, ctx.device_name, dtype)
     99 
    100 

ValueError: Attempt to convert a value (None) with an unsupported type (<class 'NoneType'>) to a Tensor.
lafta
  • 1
  • The problem seems to be that the gradients are None, but this is not a reproducible example that people can debug for you. – Dr. Snoopy Feb 11 '23 at 19:55

0 Answers0