The problem is i am unable to find seq2seq in new tensorflow library
Here's the code-
decoderOutputs, states = tf.legacy_seq2seq.embedding_rnn_seq2seq( self.encoderInputs, # List<[batch=?, inputDim=1]>, list of size args.maxLength self.decoderInputs, # For training, we force the correct output (feed_previous=False) encoDecoCell, self.textData.getVocabularySize(), self.textData.getVocabularySize(), # Both encoder and decoder have the same number of class embedding_size=self.args.embeddingSize, # Dimension of each word output_projection=outputProjection.getWeights() if outputProjection else None, feed_previous=bool(self.args.test) # When we test (self.args.test), we use previous output as next input (feed_previous) )
# training and reduce memory usage. Other solution, use sampling softmax
# For testing only
if self.args.test:
if not outputProjection:
self.outputs = decoderOutputs
else:
self.outputs = [outputProjection(output) for output in decoderOutputs]
#For training only
else:
# Finally, we define the loss function
self.lossFct = tf.compat.v1.legacy_seq2seq.sequence_loss(
decoderOutputs,
self.decoderTargets,
self.decoderWeights,
self.textData.getVocabularySize(),
softmax_loss_function=sampledSoftmax if outputProjection else None # If None, use default SoftMax
)
tf.summary.scalar('loss', self.lossFct) # Keep track of the cost
# Initialize the optimizerModel creation...
opt = tf.train.AdamOptimizer(
learning_rate=self.args.learningRate,
beta1=0.9,
beta2=0.999,
epsilon=1e-08
)
self.optOp = opt.minimize(self.lossFct)
def step(self, batch):
""" Forward/training step operation.
Does not perform run on itself but just return the operators to do so. Those have then to be run
Args:
batch (Batch): Input data on testing mode, input and target on output mode
Return:
(ops), dict: A tuple of the (training, loss) operators or (outputs,) in testing mode with the associated feed dictionary
"""
# Feed the dictionary
feedDict = {}
ops = None
if not self.args.test: # Training
for i in range(self.args.maxLengthEnco):
feedDict[self.encoderInputs[i]] = batch.encoderSeqs[i]
for i in range(self.args.maxLengthDeco):
feedDict[self.decoderInputs[i]] = batch.decoderSeqs[i]
feedDict[self.decoderTargets[i]] = batch.targetSeqs[i]
feedDict[self.decoderWeights[i]] = batch.weights[i]
ops = (self.optOp, self.lossFct)
else: # Testing (batchSize == 1)
for i in range(self.args.maxLengthEnco):
feedDict[self.encoderInputs[i]] = batch.encoderSeqs[i]
feedDict[self.decoderInputs[0]] = [self.textData.goToken]
ops = (self.outputs,)
# Return one pass operator
return ops, feedDict
Problem showing to me-
module 'tensorflow' has no attribute 'legacy_seq2seq' File "C:\Users\krith\Downloads\Personality\Personality-Chatbot\chatbot\model.py", line 167, in buildNetwork decoderOutputs, states = tf.legacy_seq2seq.embedding_rnn_seq2seq( File "C:\Users\krith\Downloads\Personality\Personality-Chatbot\chatbot\model.py", line 105, in init self.buildNetwork() File "C:\Users\krith\Downloads\Personality\Personality-Chatbot\chatbot\chatbot.py", line 154, in main self.model = Model(self.args, self.textData) File "C:\Users\krith\Downloads\Personality\Personality-Chatbot\main.py", line 29, in chatbot.main()