import torch
import torch.nn as nn
from torch.optim import Adam
class NN_Network(nn.Module):
def __init__(self,in_dim,hid,out_dim):
super(NN_Network, self).__init__()
self.linear1 = nn.Linear(in_dim,hid)
self.linear2 = nn.Linear(hid,out_dim)
def forward(self, input_array):
h = self.linear1(input_array)
y_pred = self.linear2(h)
return y_pred
in_d = 5
hidn = 2
out_d = 3
net = NN_Network(in_d, hidn, out_d)
list(net.parameters())
The result was :
[Parameter containing:
tensor([[-0.2948, -0.1261, 0.2525, -0.4162, 0.3067],
[-0.2483, -0.3600, -0.4090, 0.0844, -0.2772]], requires_grad=True),
Parameter containing:
tensor([-0.2570, -0.3754], requires_grad=True),
Parameter containing:
tensor([[ 0.4550, -0.4577],
[ 0.1782, 0.2454],
[ 0.6931, -0.6003]], requires_grad=True),
Parameter containing:
tensor([ 0.4181, -0.2229, -0.5921], requires_grad=True)]
Without using nn.Parameter, list(net.parmeters()) results as a parameters.
What I am curious is that :
I didn't used nn.Parameter command, why does it results? And to check any network's layers' parameters, then is .parameters() only way to check it?
Maybe the result was self.linear1(in_dim,hid)'s weight, bias and so on, respectively.
But is there any way to check what it is?