0

I am trying to make a Cgan and I want to do an interpolation between the categories, but I get the following error

"Input 0 of layer "conv2d" is incompatible with the layer: expected axis -1 of input shape to have value 1, but received input with shape (128, 28, 28, 11)"

can someone help me fix it.

import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns

from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Activation, BatchNormalization, Dense, Dropout, Flatten, Reshape
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import Conv2D, Conv2DTranspose
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
import tensorflow as tf

import warnings , os
from tensorflow.keras.layers import (Activation, BatchNormalization, Concatenate, Dense,Embedding, Flatten, Input, Multiply, Reshape)
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import Conv2D, Conv2DTranspose
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import Adam
from keras.models import load_model
from tensorflow.keras import activations
from tensorflow import keras


#Dimensiones de entrada del modelo
img_rows = 28
img_cols = 28
channels = 1

# Input image dimensions
img_shape = (img_rows, img_cols, channels)
z_dim = 100                                     #Tamaño del vector de ruido, utilizado como entrada al Generador

batch_size = 64
num_channels = 1
num_classes = 10
image_size = 28
latent_dim = 128
img_rows=28
img_cols=28


generator_in_channels = latent_dim + num_classes
discriminator_in_channels = num_channels + num_classes
print(generator_in_channels, discriminator_in_channels)

# Usaremos todos los ejemplos disponibles tanto del entrenamiento como de la prueba conjuntos.
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
all_digits = np.concatenate([x_train, x_test]) # une las matrices
all_labels = np.concatenate([y_train, y_test])

# Escale los valores de píxeles al rango [0, 1], agregue una dimensión de canal a
# las imágenes y la codificación one-hot de las etiquetas.
all_digits = all_digits.astype("float32") / 127.5-1.0
All_digits  = np.expand_dims(all_digits, axis=3)
All_digits = np.expand_dims(All_digits, axis=3)
all_digits = np.reshape(all_digits, (-1, 28, 28, 1)) #tal cual remodela o reacomoda
all_labels = keras.utils.to_categorical(all_labels, 10) #Convierte un vector de clase (enteros) en una matriz de clase binaria.



# Crear tf.data.Dataset.
dataset = tf.data.Dataset.from_tensor_slices((all_digits, all_labels))
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size) # este es llamado en cond_gan.fit
print(dataset)

print(f"Shape of training images: {all_digits.shape}")
print(f"Shape of training labels: {all_labels.shape}")


#DCGAN Generator
def build_generator(generator_in_channels):

    model = Sequential() #modelo es apropiado para una simple pila de capas donde cada capa tiene exactamente un tensor de entrada y un tensor de salida
    model.add(Dense(256 * 7 * 7, input_dim=generator_in_channels))
    model.add(Reshape((7, 7, 256)))
    model.add(Conv2DTranspose(128, kernel_size=3, strides=2, padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.01))
    model.add(Conv2DTranspose(64, kernel_size=3, strides=1, padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.01))
    model.add(Conv2DTranspose(1, kernel_size=3, strides=2, padding='same'))
    model.add(Activation('tanh')) 

    return model
#DCGAN Discriminator
def build_discriminator(img_shape):
    model = Sequential()
    #model.add(keras.Input(shape=(28, 28, discriminator_in_channels)))
    model.add(Conv2D(32,kernel_size=3,strides=2,padding='same'))
    model.add(LeakyReLU(alpha=0.01))
    model.add(Conv2D(64,kernel_size=3,strides=2,padding='same'))
    model.add(LeakyReLU(alpha=0.01))
    model.add(Conv2D(128,kernel_size=3,strides=2,padding='same'))
    model.add(LeakyReLU(alpha=0.01))
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))
    return model

def build_gan(generator, discriminator):
    model = Sequential()

    # Genearator -> Discriminator
    model.add(generator)
    model.add(discriminator)

    return model


# Creación y compilación de modelos discriminadores
discriminator = build_discriminator(img_shape)
discriminator.compile(loss='binary_crossentropy',optimizer=Adam(), metrics=['accuracy'])
# Preserve los parámetros del discriminador mientras entrena al generador
discriminator.trainable = False

# Crear un modelo generador
generator = build_generator(generator_in_channels)

# Cree y compile un modelo GAN con discriminadores congelados para entrenar al generador
gan = build_gan(generator, discriminator)
gan.compile(loss='binary_crossentropy', optimizer=Adam())


from operator import ge
losses = []
accuracies = []
iteration_checkpoints = []

def train(data,iterations, batch_size, sample_interval):
    # Cargar conjunto de datos MNIST
    #data = data.window(2)
    #windows = next(iter(data))
    #real_images, one_hot_labels=windows
    #print(real_images)
    #print(type(real_images))
    for iteration in range(iterations):    
        iterator = iter(data)

        # get x and y
        real_images, one_hot_labels = iterator.get_next()
        real_images=tf.constant(real_images)
      

        # Add dummy dimensions to the labels so that they can be concatenated with
         # the images. This is for the discriminator.
        image_one_hot_labels = one_hot_labels[:, :, None, None]
        image_one_hot_labels = tf.repeat( image_one_hot_labels, repeats=[image_size * image_size])
        image_one_hot_labels = tf.reshape( image_one_hot_labels, (-1, image_size, image_size, num_classes))

       # Etiqueta de imagen real: All 1
        real = np.ones((batch_size, 1)) ##------------

        # Etiquetas de imágenes falsas: Todas 0
        fake = np.zeros((batch_size, 1)) ##------------

   
        # -------------------------
        #  Entrenamiento discriminatorio
        # -------------------------

        # This is for the generator.
        batch_size = tf.shape(real_images)[0]
        random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim))
        random_vector_labels = tf.concat( [random_latent_vectors, one_hot_labels], axis=1)
    
        # Decode the noise (guided by labels) to fake images.
        generated_images = generator(random_vector_labels)

        # Obtenga lotes aleatorios de imágenes reales
        idx = np.random.randint(0, All_digits.shape[0], batch_size) ##------------
        imgs = All_digits[idx] ##------------

        # Crear lotes de imágenes falsas
        z = np.random.normal(0, 1, (batch_size, generator_in_channels)) ##------------
        gen_imgs = generator.predict(z) ##------------
        
        # Combínalos con imágenes reales. Tenga en cuenta que estamos concatenando las etiquetas con estas imágenes aquí.
        fake_image_and_labels = tf.concat([gen_imgs, image_one_hot_labels], -1)
        real_image_and_labels = tf.concat([real_images, image_one_hot_labels], -1)
        combined_images = tf.concat([fake_image_and_labels, real_image_and_labels], axis=0 )


        # Training
         
        #d_loss_real = discriminator.train_on_batch(imgs, real) ##------------
        #d_loss_fake = discriminator.train_on_batch(fake_image_and_labels, fake) ##------------
        #print(d_loss_fake)
        #print(type(d_loss_fake))
        #d_loss, accuracy = 0.5 * np.add(d_loss_real, d_loss_fake) ##------------
        
        # Reúna etiquetas que distingan las imágenes reales de las falsas.
        labels = tf.concat( [tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0 )


        #-------d_losss
        print('hola1')
        predictions = discriminator(combined_images)
        print('hola2')

        # ---------------------
        #  Training generator
        # ---------------------
        
        # Crear lotes de imágenes falsas
        
        z = np.random.normal(0, 1, (batch_size, generator_in_channels)) ##------------
        gen_imgs = generator.predict(z)##------------
        
        if iteration == 0: ##------------ 
            print("\nRandom noise input image") ##------------
            sample_images(generator)        ##------------     

        g_loss = gan.train_on_batch(z, real) ##------------

        if iteration < 200 and (iteration + 1) % 20 == 0: ##------------
            # Ahorre pérdida y precisión para trazar gráficos después del entrenamiento
            losses.append((d_loss, g_loss)) ##------------
            accuracies.append(100.0 * accuracy) ##------------
            iteration_checkpoints.append(iteration + 1) ##------------

            print("%d [D loss: %f, accuracy: %.2f%%] [G loss: %f]" % ##------------
                  (iteration + 1, d_loss, 100.0 * accuracy, g_loss)) ##------------

            # Salida de muestra de imagen generada
            sample_images(generator) ##------------
            
        elif (iteration + 1) % sample_interval == 0:##------------
            #Ahorre pérdida y precisión para trazar gráficos después del entrenamiento
            losses.append((d_loss, g_loss)) ##------------
            accuracies.append(100.0 * accuracy) ##------------
            iteration_checkpoints.append(iteration + 1) ##------------

            print("%d [D loss: %f, accuracy: %.2f%%] [G loss: %f]" %
                  (iteration + 1, d_loss, 100.0 * accuracy, g_loss)) ##------------

            # Salida de muestra de imagen generada
            sample_images(generator)##------------

def sample_images(generator, image_grid_rows=5, image_grid_columns=5):

    # muestreo de ruido aleatorio
    z = np.random.normal(0, 1, (image_grid_rows * image_grid_columns, generator_in_channels))
    
    # Generar imagen a partir de ruido aleatorio
    gen_imgs = generator.predict(z)
    
    # Escala los valores de píxeles de la imagen entre [0, 1]
    gen_imgs = 0.5 * gen_imgs + 0.5
    
    plt.figure(figsize=(6,4))
    sns.distplot(gen_imgs,color='blue',kde=True)
    plt.title("Generated Images Distribution")
    plt.show()

    fig, axs = plt.subplots(image_grid_rows,
                            image_grid_columns,
                            figsize=(7, 7),
                            sharey=True,
                            sharex=True)

    cnt = 0
    plt.figure(figsize=(9,9))
    for i in range(image_grid_rows):
        for j in range(image_grid_columns):
            axs[i, j].imshow(gen_imgs[cnt, :, :, 0], cmap='gray')
            axs[i, j].axis('off')
            cnt += 1
    plt.show()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#    axs[1, 1].imshow(gen_imgs[2, :, :, 0], cmap='gray')
#    axs[1, 1].axis('off')
#plt.show()

#Listing 4.8 Running the model
iterations = 20000 #Establece hiperparámetros
batch_size = 64
sample_interval = 1000 #Entrena el DCGAN para el número especificado de iteraciones
train(dataset,iterations, batch_size, sample_interval) 

I am not an expert, I am trying to join two codes to achieve my goal

0 Answers0