This is part of my code, a simple 1d and 2 layer perceptron, and I want to implement minibatch.
class Net(nn.Module): ## nn.Module class is used
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(1,1,bias=False) # in dim, out dim
def forward(self, x):
x = self.fc1(x)
return x
torch_dataset = Data.TensorDataset(X, Y)
Batch_size = 3
loader = Data.DataLoader(
dataset=torch_dataset,
batch_size=Batch_size,
shuffle=True,
num_workers=2,
)
if __name__ == '__main__':
for epoch in range(20): # 0 - 19
for i, current_data in enumerate(loader):
X, Y = current_data
outputs = net(X)
loss = criterion(outputs, Y)
optimizer.zero_grad()
loss.backward() # lecture4,15min
optimizer.step() ## This line is equivalent to "W = W - lr* W.grad"
print("Epoch {} - loss: {}".format(epoch, loss))
But it shows multiples errors and this.
RuntimeError: mat1 and mat2 shapes cannot be multiplied (1x3 and 1x1)
What am I doing wrong?