I'm trying to get a better grasp of the scan
functionality in theano
, my understanding is that it behaves like a for
loop based on this document. I've created a very simple working example to find the weight and bias when performing linear regression.
#### Libraries
# Third Party Libraries
import numpy as np
import theano
import theano.tensor as T
# not intended for mini-batch
def gen_data(num_points=50, slope=1, bias=10, x_max=50):
f = lambda z: slope * z + bias
x = np.zeros(shape=(num_points), dtype=theano.config.floatX)
y = np.zeros(shape=(num_points), dtype=theano.config.floatX)
for i in range(num_points):
x_temp = np.random.uniform()*x_max
x[i] = x_temp
y[i] = f(x_temp) + np.random.normal(scale=3.0)
return (x, y)
#############################################################
#############################################################
train_x, train_y = gen_data(num_points=50, slope=2, bias=5)
epochs = 50
# Declaring variable
learn_rate = T.scalar(name='learn_rate', dtype=theano.config.floatX)
x = T.vector(name='x', dtype=theano.config.floatX)
y = T.vector(name='y', dtype=theano.config.floatX)
# Variables that will be updated
theta = theano.shared(np.random.rand(), name='theta')
bias = theano.shared(np.random.rand(), name='bias')
hyp = T.dot(theta, x) + bias
cost = T.mean((hyp - y)**2)/2
f_cost = theano.function(inputs=[x, y], outputs=cost)
grad_t, grad_b = T.grad(cost, [theta, bias])
train = theano.function(inputs=[x, y, learn_rate], outputs=cost,
updates=((theta, theta-learn_rate*grad_t),
(bias, bias-learn_rate*grad_b)))
print('weight: {}, bias: {}'.format(theta.get_value(), bias.get_value()))
for i in range(epochs): # Try changing this to a `scan`
train(train_x, train_y, 0.001)
print('------------------------------')
print('weight: {}, bias: {}'.format(theta.get_value(), bias.get_value()))
I would like to change that for
loop to a theano.scan
function, but every attempt I've made has yielded one error message after the next.