-1

I'm working on a linear regression problem with Pytorch (y=A*x, where the dimensions of A are 2x2). I wrote the following code. I don't know why the loss doesn't change... Can someone help me ?

Thanks,

Thomas

import torch
import numpy as np
from scipy.integrate import odeint
from matplotlib import pyplot as plt
from torch.autograd import Variable
def EDP(X,t):
    X_0=-2*X[0]
    X_1=-2*X[1]
    grad=np.array([X_0,X_1])
    return grad
T=np.arange(0,10,0.1)
X_train=odeint(EDP,[10,20],T)

Y_train=np.zeros_like(X_train)
for i in range(Y_train.shape[0]):
    Y_train[i,:]=np.dot(np.array([[2,0],[0,2]]),X_train[i,:])
print(X_train,Y_train)

X_train=torch.Tensor(X_train)
torch.transpose(X_train,0,1)
Y_train=torch.Tensor(Y_train)
print(X_train.shape)
import torch.nn as nn
class LinearRegression(torch.nn.Module): 
    
    def __init__(self):
        super(LinearRegression, self).__init__() 
        self.linear = torch.nn.Linear(2,2,bias = False) # bias is default True

    def forward(self, x):
        y_pred = self.linear(x)
        return y_pred
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(our_model.parameters(), lr = 0.0001) 
our_model = LinearRegression()
x_train = X_train
y_train = Y_train
#x_train.requires_grad=True
print(x_train.shape)
print(y_train.shape)
ntrain=10

for t in range(ntrain):
    
    y_pred=our_model(x_train)
    loss=criterion(y_train,y_pred)
    loss.backward()
    optimizer.step()
    optimizer.zero_grad()
    print(t,loss)
print(our_model.linear.weight)
spe2005
  • 3
  • 1
  • 2
    *I'm working on a linear regression problem with Pytorch*, so next time please do not add irrelevant tags, such as `tensorflow`. – Frightera Mar 20 '21 at 17:47

1 Answers1

0

In my laptop it worked ...
since you are running it on just 10 epochs ...and using lr = 0.0001 ,you wont see it in just 10 epochs.

i did this optimizer = torch.optim.SGD(our_model.parameters(), lr = 0.01) (increased lr )which actually decreased the loss in just 10 epochs

Prajot Kuvalekar
  • 5,128
  • 3
  • 21
  • 32