0

I am writing a neural network model trained by both categorical parameters and numerical parameters. What i did is using embedding method to categorical parameter than combine the result with numerical parameter before i put them all into the model.

def build_model2_three_hidden_layers():
    model = Sequential()


    # Use Input layers, specify input shape (dimensions except first)
    inp_cat_data1 = keras.layers.Input(shape=(no_of_unique_cat(cat_data1),))
    inp_cat_data2 = keras.layers.Input(shape=(no_of_unique_cat(cat_data2),))
    inp_cat_data3 = keras.layers.Input(shape=(no_of_unique_cat(cat_data3),))
    inp_cat_data4 = keras.layers.Input(shape=(no_of_unique_cat(cat_data4),))
    inp_cat_data5 = keras.layers.Input(shape=(no_of_unique_cat(cat_data5),))
    inp_cat_data6 = keras.layers.Input(shape=(no_of_unique_cat(cat_data6),))
    inp_cat_data7 = keras.layers.Input(shape=(no_of_unique_cat(cat_data7),))
    inp_num_data = keras.layers.Input(shape=(num_data.shape[1],))

    # Bind nulti_hot to embedding layer
    emb1 = keras.layers.Embedding(input_dim=no_of_unique_cat(cat_data1), output_dim=embedding_size(cat_data1))(inp_cat_data1)
    emb2 = keras.layers.Embedding(input_dim=no_of_unique_cat(cat_data2), output_dim=embedding_size(cat_data2))(inp_cat_data2)
    emb3 = keras.layers.Embedding(input_dim=no_of_unique_cat(cat_data3), output_dim=embedding_size(cat_data3))(inp_cat_data3)
    emb4 = keras.layers.Embedding(input_dim=no_of_unique_cat(cat_data4), output_dim=embedding_size(cat_data4))(inp_cat_data4)
    emb5 = keras.layers.Embedding(input_dim=no_of_unique_cat(cat_data5), output_dim=embedding_size(cat_data5))(inp_cat_data5)
    emb6 = keras.layers.Embedding(input_dim=no_of_unique_cat(cat_data6), output_dim=embedding_size(cat_data6))(inp_cat_data6)
    emb7 = keras.layers.Embedding(input_dim=no_of_unique_cat(cat_data7), output_dim=embedding_size(cat_data7))(inp_cat_data7)
    # Also you need flatten embedded output of shape (?,3,2) to (?, 6) -
    # otherwise it's not possible to concatenate it with inp_num_data
    flatten1 = keras.layers.Flatten()(emb1)
    flatten2 = keras.layers.Flatten()(emb2)
    flatten3 = keras.layers.Flatten()(emb3)
    flatten4 = keras.layers.Flatten()(emb4)
    flatten5 = keras.layers.Flatten()(emb5)
    flatten6 = keras.layers.Flatten()(emb6)
    flatten7 = keras.layers.Flatten()(emb7)
    # Concatenate two layers
    conc = keras.layers.Concatenate()([flatten1, flatten2, flatten3, flatten4, flatten5, flatten6, flatten7, inp_num_data])
    dense1 = keras.layers.Dense(3, activation=tf.nn.relu, )(conc)
    # Creating output layer
    out = keras.layers.Dense(1, activation=None)(dense1)
    model = keras.Model(inputs=[inp_cat_data1, 
                  inp_cat_data2, 
                  inp_cat_data3, 
                  inp_cat_data4, 
                  inp_cat_data5, 
                  inp_cat_data6, 
                  inp_cat_data7, 
                  inp_num_data], outputs=out)

While i am really trying to fit the model with my training dataset, i put several inputs into the model

with tf.device('/CPU:0'): 
    history = model2.fit(
    x=[cat_data1,cat_data2,cat_data3,cat_data4,cat_data5,cat_data6,cat_data7,num_data],
    y=train_labels,
        batch_size = batch_size,
        epochs=EPOCHS, 
        verbose=1,
        shuffle=True,
        steps_per_epoch = int(train_dataset.shape[0] / batch_size),
        validation_data = ([val_data1,val_data2,val_data3,val_data4,val_data5,val_data6,val_data7,val_num_data], valid_labels))

However, after i attempted to train the model, it came back with a error message: enter image description here i've searched on stackover flow but sadly nobody has met the question as i did, i wonder if there is anything i can do to my code?

by the way, this is how my dataset looks like enter image description here

0 Answers0