0

https://github.com/cmgreen210/TensorFlowDeepAutoencoder i'm trying to save and restore model after the fine tuning step i tried to restore the model then get the variable from the model and it gave me this error, ValueError: Variable autoencoder_variables/weights1 does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope? If i make the reuse into false it create a new variables for the weights and biases

here's my code to restore the model.

def do_eval(sess, eval_correct,images_placeholder,labels_placeholder,
          data_set):
  true_count = 0 # Counts the number of correct predictions.
  steps_per_epoch = data_set.num_examples // FLAGS.batch_size
  num_examples = steps_per_epoch * FLAGS.batch_size
  for step in range(steps_per_epoch):
    feed_dict = fill_feed_dict(data_set,
          images_placeholder,
          labels_placeholder)
    true_count += sess.run(eval_correct, feed_dict=feed_dict)
    precision = true_count / num_examples
    print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
          (num_examples, true_count, precision))

def evaluation(logits, labels):
  return tf.reduce_sum(tf.cast(correct, tf.int32))

def test_nets(self):
  data = read_data_sets(FLAGS.data_dir)
  ckpt = tf.train.get_checkpoint_state("model_sps_2017-08-
          29_11:45:25")

sess = tf.InteractiveSession()
saver = tf.train.import_meta_graph('model_sps_2017-08-29_11:45:25/model.meta')
saver.restore(sess, ckpt.model_checkpoint_path)

with sess.as_default():
  ae_shape = [784, 2000, 2000, 2000, 10]
  ae = AutoEncoder(ae_shape, sess)

  input_pl = tf.placeholder(tf.float32, shape=(FLAGS.batch_size,
          FLAGS.image_pixels),name='input_pl')
  sup_net = ae.supervised_net(input_pl)
  data = read_data_sets(FLAGS.data_dir)
  labels_placeholder = tf.placeholder(tf.int32,
          shape=FLAGS.batch_size,
          name='target_pl')
  eval_correct = evaluation(sup_net, labels_placeholder)
  do_eval(sess,
      eval_correct,
      input_pl,
      labels_placeholder,
      data.test)

this is the code how i train and save the supervise model

def main_supervised(ae):

  with ae.session.graph.as_default():
    saver = tf.train.Saver()
    sess = ae.session
    input_pl = tf.placeholder(tf.float32, shape=(FLAGS.batch_size,
                                                 FLAGS.image_pixels),
                              name='input_pl')
    logits = ae.supervised_net(input_pl)

    data = read_data_sets(FLAGS.data_dir)
    num_train = data.train.num_examples

    labels_placeholder = tf.placeholder(tf.int32,
                                        shape=FLAGS.batch_size,
                                        name='target_pl')

    loss = loss_supervised(logits, labels_placeholder)
    train_op, global_step = training(loss, FLAGS.supervised_learning_rate)
    eval_correct = evaluation(logits, labels_placeholder)

    hist_summaries = [ae['biases{0}'.format(i + 1)]
                      for i in range(ae.num_hidden_layers + 1)]
    hist_summaries.extend([ae['weights{0}'.format(i + 1)]
                           for i in range(ae.num_hidden_layers + 1)])

    hist_summaries = [tf.summary.histogram(v.op.name + "_fine_tuning", v)
                      for v in hist_summaries]
    summary_op = tf.summary.merge(hist_summaries)

    summary_writer = tf.summary.FileWriter(FLAGS.summary_dir, sess.graph_def)
    # tf.train.SummaryWriter(pjoin(FLAGS.summary_dir,
    #                                               'fine_tuning'),
    #                                         graph_def=sess.graph_def,
    #                                         flush_secs=FLAGS.flush_secs)

    vars_to_init = ae.get_variables_to_init(ae.num_hidden_layers + 1)
    vars_to_init.append(global_step)
    #sess.run(tf.initialize_variables(vars_to_init))
    init = tf.initialize_all_variables() 
    sess.run(init)
    steps =  (num_train//FLAGS.batch_size)
    for k in range(1):
      for step in range(1):
        start_time = time.time()

        feed_dict = fill_feed_dict(data.train,
                                   input_pl,
                                   labels_placeholder)

        _, loss_value = sess.run([train_op, loss],
                                 feed_dict=feed_dict)

        duration = time.time() - start_time

        # Write the summaries and print an overview fairly often.
        if step % 1 == 0:
          # Print status to stdout.
          print('Step %d/%d: loss = %.2f (%.3f sec)' % (step, steps,loss_value, duration))
          # Update the events file.

          summary_str = sess.run(summary_op, feed_dict=feed_dict)
          summary_writer.add_summary(summary_str, step)
          # summary_img_str = sess.run(
          #     tf.summary.image("training_images",
          #                      tf.reshape(input_pl,
          #                                 (FLAGS.batch_size,
          #                                  FLAGS.image_size,
          #                                  FLAGS.image_size, 1)),
          #                      max_outputs=10),
          #     feed_dict=feed_dict
          # )
          # summary_writer.add_summary(summary_img_str)

        if (step + 1) % 1000 == 0 or (step + 1) == steps:
          train_sum = do_eval_summary("training_error",
                                      sess,
                                      eval_correct,
                                      input_pl,
                                      labels_placeholder,
                                      data.train)

          val_sum = do_eval_summary("validation_error",
                                    sess,
                                    eval_correct,
                                    input_pl,
                                    labels_placeholder,
                                    data.validation)

          test_sum = do_eval_summary("test_error",
                                     sess,
                                     eval_correct,
                                     input_pl,
                                     labels_placeholder,
                                     data.test)

          summary_writer.add_summary(train_sum, step)
          summary_writer.add_summary(val_sum, step)
          summary_writer.add_summary(test_sum, step)

    folder = "model_sps_"+str(strftime("%Y-%m-%d_%H:%M:%S", gmtime()))
    os.mkdir(folder)
    folder += "/model"
    saver.save(sess, folder)
    do_eval(sess,
        eval_correct,
        input_pl,
        labels_placeholder,
        data.test)

this how i setup the variable and restore it one by one on the scope of autoencoder_variables

def _restore_variables(self):
    #print(tf.get_collection(tf.GraphKeys.VARIABLES, scope='autoencoder_variables'))
    # v=tf.get_variable("autoencoder_variables/weights1", shape=(784, 2000))

    # print(v)
    with tf.variable_scope("autoencoder_variables",reuse=True ) as scope1:
      #print(tf.get_collection(tf.GraphKeys.VARIABLES, scope="autoencoder_variables"))
      #print(scope)
      #tf.Variable 'autoencoder_variables/weights1:0' shape=(784, 2000)


      for i in range(self.__num_hidden_layers + 1):
        # Train weights
        name_w = self._weights_str.format(i + 1)
        w_shape = (self.__shape[i], self.__shape[i + 1])

        self[name_w] = tf.get_variable(name_w,w_shape,trainable=False)

        # Train biases
        name_b = self._biases_str.format(i + 1)
        b_shape = (self.__shape[i + 1],)
        self[name_b] = tf.get_variable(name_b,b_shape)

        if i < self.__num_hidden_layers:
          # Hidden layer fixed weights (after pretraining before fine tuning)
          self[name_w + "_fixed"] = tf.get_variable(name_w+ "_fixed",w_shape)

          # Hidden layer fixed biases
          self[name_b + "_fixed"] = tf.get_variable(name_b+ "_fixed",b_shape)
          # Pretraining output training biases
          name_b_out = self._biases_str.format(i + 1) + "_out"
          b_shape = (self.__shape[i],)
          self[name_b_out] = tf.get_variable(name_b_out,b_shape)

traceback error :

File "run.py", line 47, in <module>
    main()
  File "run.py", line 40, in main
    test.test_nets_1()
  File "C:\Users\simjs\Downloads\TensorFlowDeepAutoencoder-master\TensorFlowDeepAutoencoder-master\code\ae\autoencoder_test.py", line 55, in test_nets_1
    ae = AutoEncoder(ae_shape, sess)
  File "C:\Users\simjs\Downloads\TensorFlowDeepAutoencoder-master\TensorFlowDeepAutoencoder-master\code\ae\autoencoder.py", line 41, in __init__
    self._restore_variables()
  File "C:\Users\simjs\Downloads\TensorFlowDeepAutoencoder-master\TensorFlowDeepAutoencoder-master\code\ae\autoencoder.py", line 98, in _restore_variables
    self[name_w] = tf.get_variable(name_w,w_shape)
  File "C:\Users\simjs\Anaconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 1065, in get_variable
    use_resource=use_resource, custom_getter=custom_getter)
  File "C:\Users\simjs\Anaconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 962, in get_variable
    use_resource=use_resource, custom_getter=custom_getter)
  File "C:\Users\simjs\Anaconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 367, in get_variable
    validate_shape=validate_shape, use_resource=use_resource)
  File "C:\Users\simjs\Anaconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 352, in _true_getter
    use_resource=use_resource)
  File "C:\Users\simjs\Anaconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 682, in _get_single_variable
    "VarScope?" % name)
ValueError: Variable autoencoder_variables/weights1 does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope?

1 Answers1

0
  1. Check which variables are not initialized by writting print(sess.run(tf.report_uninitialized_variables())) after the restoring line.

  2. Try also sess.run(tf.global_variables_initializer()) before restoring and analyse your results.

prometeu
  • 679
  • 1
  • 8
  • 23
  • i did that and during the restoring session and i found that in the autoencoder class constructor it recreate the weights and bias variable again on the call of self._setup_variables(). i think the weights and biases variable from the trained model should be initiated and associated to the key value of the autoencoder. Do you have any idea on doing this? ae[weigth_n] , weight_n <- from the model? i comment the self._setup_variables() and it gave /autoencoder.py", line 120, in _w return self[self._weights_str.format(n) + suffix] – Simranjeet Singh Aug 31 '17 at 12:48
  • , line 68, in __getitem__ return self.__variables[item] KeyError: 'weights1' – Simranjeet Singh Aug 31 '17 at 12:59
  • Can you update your question including your comment and whatever you think of? To use the same variables as in `_setup_variables`, befor calling it, you can use `with tf.name_scope('autoencoder_variables', reuse=True)` and no new variables will be created and the old ones will be reused. – prometeu Aug 31 '17 at 14:53
  • Thanks a lot for ur solution,i hope it will work for charm now, I'll soon update the question and comment above. – Simranjeet Singh Aug 31 '17 at 16:09
  • i tried so and now it gave this error, ValueError: Variable autoencoder_variables/weights1 does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope? – Simranjeet Singh Sep 01 '17 at 10:03
  • Where do you use `_restore_variables()`? Can you copy the Traceback of the error? – prometeu Sep 01 '17 at 19:02
  • inside the autoencoder class i copied the _setup_variables() and change the tf.variable to tf.get_variable – Simranjeet Singh Sep 02 '17 at 03:09
  • i replace the tf.get_variable() with tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='autoencoder_variables/'+var_name)[0] to get the variable directly from the graph and remove the tf.global_variables_initializer() graph and session restoration now it works perfectly. Thanks anyway – Simranjeet Singh Sep 02 '17 at 04:35