I have simply implemented DataParallel
technique to utilize multiple GPUs on single machine. I am getting an error in fit function
https://github.com/mindee/doctr/blob/main/references/recognition/train_pytorch.py
from fastprogress.fastprogress import master_bar, progress_bar
In fit_one_epoch
function:
for images, targets in progress_bar(train_loader, parent=mb):
images = images.to(device)
targets = targets.to(device)
In main
func:
model = model.to(device)
if device == 'cuda':
model = nn.DataParallel(model)
# model = model.to(device)
cudnn.benchmark = True
Traceback
Traceback (most recent call last):
File "/home2/coremax/Documents/doctr/references/recognition/DP_KR.py", line 481, in <module>
main(args)
File "/home2/coremax/Documents/doctr/references/recognition/DP_KR.py", line 390, in main
fit_one_epoch(model, train_loader, batch_transforms, optimizer, scheduler, mb, amp=args.amp)
File "/home2/coremax/Documents/doctr/references/recognition/DP_KR.py", line 122, in fit_one_epoch
targets = targets.to(device)
AttributeError: 'list' object has no attribute 'to