I am trying to extract key value pair from scanned invoices document using LayoutLMV2 model but I am getting error. Installation guide. I am just trying to check how the model is predicting the key value pair from the document or do I need to fine tune the model in own data set. Need help in this.
from transformers import PreTrainedTokenizerBase, LayoutLMv2FeatureExtractor, LayoutLMv2Processor, AutoTokenizer, LayoutLMv2ForRelationExtraction, AdamW
from transformers.file_utils import PaddingStrategy
import torch
from torch import nn
from dataclasses import dataclass
from typing import Dict, Tuple, Optional, Union
from datasets import load_dataset
from torch.utils.data import DataLoader
from torchvision.transforms import ToPILImage
from torchvision import transforms
from PIL import Image, ImageFont, ImageDraw, ImageEnhance, ImageFilter
import numpy as np
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Users\name\AppData\Local\Programs\Tesseract-OCR\tesseract.exe'
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=True)
tokenizer = AutoTokenizer.from_pretrained(path_1, pad_token='<pad>')
processor = LayoutLMv2Processor(feature_extractor, tokenizer)
model = LayoutLMv2ForRelationExtraction.from_pretrained(path_1)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
image_file = 'image2.png'
image = Image.open(image_file).convert('RGB')
image.size
encoded_inputs = processor(image, return_tensors="pt")
encoded_inputs.keys()
for k,v in encoded_inputs.items():
print(k, v.shape)
for k,v in encoded_inputs.items():
encoded_inputs[k] = v.to(model.device)
# forward pass
outputs = model(**encoded_inputs)
This is the error I am getting
TypeError Traceback (most recent call last)
c:\Users\name\Parallel Project\Trans_LayoutXLM.ipynb Cell 7 in <cell line: 5>()
2 encoded_inputs[k] = v.to(model.device)
4 # forward pass
----> 5 outputs = model(**encoded_inputs)
File c:\Users\name\.conda\envs\layoutlmft\lib\site-packages\torch\nn\modules\module.py:1130, in Module._call_impl(self, *input, **kwargs)
1126 # If we don't have any hooks, we want to skip the rest of the logic in
1127 # this function, and just call forward.
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
File c:\Users\name\.conda\envs\layoutlmft\lib\site-packages\transformers\models\layoutlmv2\modeling_layoutlmv2.py:1598, in LayoutLMv2ForRelationExtraction.forward(self, input_ids, bbox, labels, image, attention_mask, token_type_ids, position_ids, head_mask, entities, relations)
1596 sequence_output, image_output = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
1597 sequence_output = self.dropout(sequence_output)
-> 1598 loss, pred_relations = self.extractor(sequence_output, entities, relations)
1600 return RegionExtractionOutput(
1601 loss=loss,
1602 entities=entities,
(...)
1605 hidden_states=outputs[0],
1606 )
File c:\Users\name\.conda\envs\layoutlmft\lib\site-packages\torch\nn\modules\module.py:1130, in Module._call_impl(self, *input, **kwargs)
1126 # If we don't have any hooks, we want to skip the rest of the logic in
1127 # this function, and just call forward.
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
...
-> 1421 batch_size = len(relations)
1422 new_relations = []
1423 for b in range(batch_size):
TypeError: object of type 'NoneType' has no len()