-2

I am trying to do a sequence detection on the mnist dataset. I would like to do this without RNNs. In order to do so I horizontally stacked (up to)5 images to a sequence and then run the classification on it. However it does not work that well as I am receiving low accuracy

data = tf.placeholder(dtype=tf.float32,shape=(None, 28,140,1))
tf_train_labels = tf.placeholder(dtype=tf.float32, shape=(None, 5,11))

w1 = tf.Variable(tf.truncated_normal(shape=(3,3, 1,32), stddev=0.1))
b1 = tf.Variable(tf.zeros(32))

w2 = tf.Variable(tf.truncated_normal(shape=(3,3,32,64), stddev=0.1))
b2 = tf.Variable(tf.constant(1., shape=[64]))

w22 = tf.Variable(tf.truncated_normal(shape=(3,3,64,128), stddev=0.1))
b22 = tf.Variable(tf.constant(1., shape=[128]))



w3 = tf.Variable(tf.truncated_normal(shape=(28 // 4 * 140 // 4 * 128,1024)))
b3 = tf.Variable(tf.constant(1., shape=[1024]))

w4 = tf.Variable(tf.truncated_normal(shape=(1024,11), stddev=0.1))
b4 = tf.Variable(tf.constant(1., shape=[11]))

w5 = tf.Variable(tf.truncated_normal(shape=(1024,11), stddev=0.1))
b5 = tf.Variable(tf.constant(1., shape=[11]))

w6 = tf.Variable(tf.truncated_normal(shape=(1024,11), stddev=0.1))
b6 = tf.Variable(tf.constant(1., shape=[11]))

w7 = tf.Variable(tf.truncated_normal(shape=(1024,11), stddev=0.1))
b7 = tf.Variable(tf.constant(1., shape=[11]))

w8 = tf.Variable(tf.truncated_normal(shape=(1024,11), stddev=0.1))
b8 = tf.Variable(tf.constant(1., shape=[11]))



def model(x, w, b):
    conv= tf.nn.relu(tf.nn.conv2d(x, w1, [1,1,1,1], padding="SAME")+b1)
    conv = tf.nn.max_pool(conv, [1,2,2,1], [1,2,2,1], padding="SAME")
    conv = tf.nn.relu(tf.nn.conv2d(conv, w2, [1,1,1,1], padding="SAME")+b2)
    conv = tf.nn.max_pool(conv, [1,2,2,1], [1,2,2,1],padding="SAME")
    conv = tf.nn.relu(tf.nn.conv2d(conv, w22, [1,1,1,1], padding="SAME")+b22)

    shape = conv.get_shape().as_list()
    reshape = tf.reshape(conv, [-1, shape[1] * shape[2] * shape[3]])
    dense = tf.nn.relu(tf.matmul(reshape, w3)+b3)
    return tf.matmul(dense, w) + b
pred1 = model(data, w4, b4)
pred2 = model(data, w5, b5)
pred3 = model(data, w6, b6)
pred4 = model(data, w7, b7)
pred5 = model(data, w8, b8)

prediction = tf.stack([
        tf.nn.softmax(pred1),
        tf.nn.softmax(pred2),
        tf.nn.softmax(pred3),
        tf.nn.softmax(pred4),
        tf.nn.softmax(pred5)])


loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
               logits = pred1, labels = tf_train_labels[:, 0])) + \
           tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
               logits = pred2, labels = tf_train_labels[:, 1])) + \
           tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
               logits = pred3, labels = tf_train_labels[:, 2])) + \
           tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
               logits = pred4, labels = tf_train_labels[:, 3])) + \
           tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
               logits = pred5, labels = tf_train_labels[:, 4]))


optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.0001).minimize(loss)
init = tf.global_variables_initializer()

Is there any logical error in the code or did I just not train long enough or took a wrong model? Thank you and best regards

b4shyou
  • 187
  • 1
  • 1
  • 7
  • 2
    Can you be more descriptive about your problem? When you say it doesn't work that well - are you encountering an error? Does it take a very long time to run? Are you receiving inaccurate output? – souldeux Dec 01 '18 at 14:52
  • I am receiving very inaccurate output – b4shyou Dec 02 '18 at 12:09

1 Answers1

0

I solved the problem by switching from GradientDescent to Adam Optimizer

b4shyou
  • 187
  • 1
  • 1
  • 7