Consider the following network:
%%time
import torch
from torch.autograd import grad
import torch.nn as nn
import torch.optim as optim
class net_x(nn.Module):
def __init__(self):
super(net_x, self).__init__()
self.fc1=nn.Linear(1, 20)
self.fc2=nn.Linear(20, 20)
self.out=nn.Linear(20, 400) #a,b,c,d
def forward(self, x):
x=torch.tanh(self.fc1(x))
x=torch.tanh(self.fc2(x))
x=self.out(x)
return x
nx = net_x()
#input
val = 100
t = torch.rand(val, requires_grad = True) #input vector
t = torch.reshape(t, (val,1)) #reshape for batch
#method
dx = torch.autograd.functional.jacobian(lambda t_: nx(t_), t)
This outputs
CPU times: user 11.1 s, sys: 3.52 ms, total: 11.1 s
Wall time: 11.1 s
However, when I change to using the GPU with .to(device)
:
%%time
import torch
from torch.autograd import grad
import torch.nn as nn
import torch.optim as optim
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class net_x(nn.Module):
def __init__(self):
super(net_x, self).__init__()
self.fc1=nn.Linear(1, 20)
self.fc2=nn.Linear(20, 20)
self.out=nn.Linear(20, 400) #a,b,c,d
def forward(self, x):
x=torch.tanh(self.fc1(x))
x=torch.tanh(self.fc2(x))
x=self.out(x)
return x
nx = net_x()
nx.to(device)
#input
val = 100
t = torch.rand(val, requires_grad = True) #input vector
t = torch.reshape(t, (val,1)).to(device) #reshape for batch
#method
dx = torch.autograd.functional.jacobian(lambda t_: nx(t_), t)
This outputs:
CPU times: user 18.6 s, sys: 1.5 s, total: 20.1 s
Wall time: 19.5 s
Update 1: Checking the timing of the process of moving the inputs and model to device:
%%time
nx.to(device)
t.to(device)
This outputs:
CPU times: user 2.05 ms, sys: 0 ns, total: 2.05 ms
Wall time: 2.13 ms
Update 2:
Looks like @Gulzar was right. I changed my batch size to 1000 (val=1000
) and the CPU outputs:
Wall time: 8min 44s
While the GPU outputs:
Wall time: 3min 12s