# Define the maximum sequence
lengthsmax_input_len = 1000max_target_len = 100
# Define the input and output shapes
encoder_inputs = Input(shape=(max_input_len,))decoder_inputs = Input(shape=(max_target_len,))
# Define the embedding layer
embedding = Embedding(input_dim=len(tokenizer.word_index)+1, output_dim=256)
# Define the LSTM layer in the encoder
encoder_lstm = LSTM(256, return_sequences=True, return_state=True)
# Define the LSTM layer in the decoder
decoder_lstm = LSTM(256, return_sequences=True, return_state=True)
# Define the attention layer
attention_layer = Attention()
# Define the dense layer
dense = Dense(len(tokenizer.word_index)+1, activation='softmax')
# Define the encoder model
encoder_embedding = embedding(encoder_inputs)
encoder_outputs, state_h, state_c = encoder_lstm(encoder_embedding)
encoder_states = [state_h, state_c]
encoder_model = Model(encoder_inputs, encoder_states)
# Define the decoder model
decoder_embedding = embedding(decoder_inputs)
decoder_outputs, _, _ = decoder_lstm(decoder_embedding, initial_state=encoder_states)
attention = attention_layer([decoder_outputs, encoder_outputs])
decoder_concat = tf.concat([decoder_outputs, attention], axis=-1)
decoder_outputs = dense(decoder_concat)
decoder_model = Model([decoder_inputs] + encoder_states, [decoder_outputs])
I have tried to check its arrangement but still not working.
Namely, this is the error I get:
ValueError:
Found input tensor cannot be reached given provided output tensors. Please make sure the tensor KerasTensor(type_spec=TensorSpec(shape=(None, 1000), dtype=tf.float32, name='input_13'), name='input_13', description="created by layer 'input_13'") is included in the model inputs when building functional model.`