2

I am trying to extract key value pair from scanned invoices document using LayoutLMV2 model but I am getting error. Installation guide. I am just trying to check how the model is predicting the key value pair from the document or do I need to fine tune the model in own data set. Need help in this.

from transformers import PreTrainedTokenizerBase, LayoutLMv2FeatureExtractor, LayoutLMv2Processor, AutoTokenizer, LayoutLMv2ForRelationExtraction, AdamW
from transformers.file_utils import PaddingStrategy
import torch
from torch import nn
from dataclasses import dataclass
from typing import Dict, Tuple, Optional, Union
from datasets import load_dataset
from torch.utils.data import DataLoader
from torchvision.transforms import ToPILImage
from torchvision import transforms
from PIL import Image, ImageFont, ImageDraw, ImageEnhance, ImageFilter
import numpy as np
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Users\name\AppData\Local\Programs\Tesseract-OCR\tesseract.exe'

feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=True)
tokenizer = AutoTokenizer.from_pretrained(path_1, pad_token='<pad>')
processor = LayoutLMv2Processor(feature_extractor, tokenizer)
model = LayoutLMv2ForRelationExtraction.from_pretrained(path_1)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
image_file = 'image2.png'
image = Image.open(image_file).convert('RGB')
image.size
encoded_inputs = processor(image, return_tensors="pt")
encoded_inputs.keys()
for k,v in encoded_inputs.items():
  print(k, v.shape)
for k,v in encoded_inputs.items():
  encoded_inputs[k] = v.to(model.device)

# forward pass
outputs = model(**encoded_inputs)

This is the error I am getting

    TypeError                                 Traceback (most recent call last)
c:\Users\name\Parallel Project\Trans_LayoutXLM.ipynb Cell 7 in <cell line: 5>()
      2   encoded_inputs[k] = v.to(model.device)
      4 # forward pass
----> 5 outputs = model(**encoded_inputs)

File c:\Users\name\.conda\envs\layoutlmft\lib\site-packages\torch\nn\modules\module.py:1130, in Module._call_impl(self, *input, **kwargs)
   1126 # If we don't have any hooks, we want to skip the rest of the logic in
   1127 # this function, and just call forward.
   1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1129         or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130     return forward_call(*input, **kwargs)
   1131 # Do not call functions when jit is used
   1132 full_backward_hooks, non_full_backward_hooks = [], []

File c:\Users\name\.conda\envs\layoutlmft\lib\site-packages\transformers\models\layoutlmv2\modeling_layoutlmv2.py:1598, in LayoutLMv2ForRelationExtraction.forward(self, input_ids, bbox, labels, image, attention_mask, token_type_ids, position_ids, head_mask, entities, relations)
   1596 sequence_output, image_output = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
   1597 sequence_output = self.dropout(sequence_output)
-> 1598 loss, pred_relations = self.extractor(sequence_output, entities, relations)
   1600 return RegionExtractionOutput(
   1601     loss=loss,
   1602     entities=entities,
   (...)
   1605     hidden_states=outputs[0],
   1606 )

File c:\Users\name\.conda\envs\layoutlmft\lib\site-packages\torch\nn\modules\module.py:1130, in Module._call_impl(self, *input, **kwargs)
   1126 # If we don't have any hooks, we want to skip the rest of the logic in
   1127 # this function, and just call forward.
   1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1129         or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130     return forward_call(*input, **kwargs)
   1131 # Do not call functions when jit is used
   1132 full_backward_hooks, non_full_backward_hooks = [], []
...
-> 1421     batch_size = len(relations)
   1422     new_relations = []
   1423     for b in range(batch_size):

TypeError: object of type 'NoneType' has no len()
Laxmi
  • 21
  • 8

1 Answers1

2
import numpy as np
from transformers import LayoutLMv2Processor, LayoutLMv2ForTokenClassification
from datasets import load_dataset
from PIL import Image, ImageDraw, ImageFont


processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased")
model = LayoutLMv2ForTokenClassification.from_pretrained("nielsr/layoutlmv2-finetuned-funsd")


dataset = load_dataset("nielsr/funsd", split="test")
labels = dataset.features['ner_tags'].feature.names
id2label = {v: k for v, k in enumerate(labels)}
label2color = {'question':'blue', 'answer':'green', 'header':'orange', 'other':'red'}


def unnormalize_box(bbox, width, height):
     return [
         width * (bbox[0] / 1000),
         height * (bbox[1] / 1000),
         width * (bbox[2] / 1000),
         height * (bbox[3] / 1000),
     ]


def iob_to_label(label):
    label = label[2:]
    if not label:
      return 'other'
    return label


image_path="invoice.jpg"

# load image example
image = Image.open(image_path).convert("RGB")
image


def process_image(image):
    width, height = image.size
    # encode
    encoding = processor(image, truncation=True, return_offsets_mapping=True, return_tensors="pt")
    offset_mapping = encoding.pop('offset_mapping')
    # forward pass
    outputs = model(**encoding)
    # get predictions
    predictions = outputs.logits.argmax(-1).squeeze().tolist()
    token_boxes = encoding.bbox.squeeze().tolist()
    # only keep non-subword predictions
    is_subword = np.array(offset_mapping.squeeze().tolist())[:,0] != 0
    true_predictions = [id2label[pred] for idx, pred in enumerate(predictions) if not is_subword[idx]]
    true_boxes = [unnormalize_box(box, width, height) for idx, box in enumerate(token_boxes) if not is_subword[idx]]
    # draw predictions over the image
    draw = ImageDraw.Draw(image)
    font = ImageFont.load_default()
    for prediction, box in zip(true_predictions, true_boxes):
        predicted_label = iob_to_label(prediction).lower()
        draw.rectangle(box, outline=label2color[predicted_label])
        draw.text((box[0]+10, box[1]-10), text=predicted_label, fill=label2color[predicted_label], font=font)
    return image

process_image(image)
Abhishek Bisht
  • 138
  • 1
  • 10
  • This is working for me to get question: answer values from the invoice, may be you need to do some manipulation in the code to get what excatlly you want – Abhishek Bisht Aug 31 '22 at 17:00
  • Thanks for the code. Yes, I need to apply some logics so I can make the question as a key and the answer as a value and get the details as key value pair. – Laxmi Sep 01 '22 at 14:09
  • yeah this can be your starting point , hope this will be helpful – Abhishek Bisht Sep 03 '22 at 07:32