Spaces:
Running
Running
fix error
Browse files- README.md +1 -1
- app.py +4 -8
- requirements.txt +4 -3
README.md
CHANGED
|
@@ -6,7 +6,7 @@ colorTo: green
|
|
| 6 |
sdk: gradio
|
| 7 |
app_file: app.py
|
| 8 |
pinned: false
|
| 9 |
-
sdk_version:
|
| 10 |
---
|
| 11 |
|
| 12 |
# Configuration
|
|
|
|
| 6 |
sdk: gradio
|
| 7 |
app_file: app.py
|
| 8 |
pinned: false
|
| 9 |
+
sdk_version: 6.1.0
|
| 10 |
---
|
| 11 |
|
| 12 |
# Configuration
|
app.py
CHANGED
|
@@ -3,23 +3,19 @@ import os
|
|
| 3 |
# build detectron2 from source
|
| 4 |
# we can't build detectron2 in requirements.txt because it needs PyTorch installed first,
|
| 5 |
# but requirements.txt will try to build wheels before installing any packages.
|
| 6 |
-
os.system("pip install git+https://github.com/facebookresearch/detectron2.git")
|
| 7 |
|
| 8 |
import gradio as gr
|
| 9 |
import numpy as np
|
| 10 |
from datasets import load_dataset
|
| 11 |
-
from PIL import
|
| 12 |
from transformers import LayoutLMv2ForTokenClassification, LayoutLMv2Processor
|
| 13 |
|
| 14 |
processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased")
|
| 15 |
model = LayoutLMv2ForTokenClassification.from_pretrained("nielsr/layoutlmv2-finetuned-funsd")
|
| 16 |
|
| 17 |
-
# load image example
|
| 18 |
-
dataset = load_dataset("nielsr/funsd", split="test", trust_remote_code=True)
|
| 19 |
-
image = Image.open(dataset[0]["image"]).convert("RGB")
|
| 20 |
-
image = Image.open("./invoice.png")
|
| 21 |
-
image.save("document.png")
|
| 22 |
# define id2label, label2color
|
|
|
|
| 23 |
labels = dataset.features["ner_tags"].feature.names
|
| 24 |
id2label = {v: k for v, k in enumerate(labels)}
|
| 25 |
label2color = {"question": "blue", "answer": "green", "header": "orange", "other": "violet"}
|
|
@@ -74,7 +70,7 @@ def process_image(image):
|
|
| 74 |
title = "Interactive demo: LayoutLMv2"
|
| 75 |
description = "Demo for Microsoft's LayoutLMv2, a Transformer for state-of-the-art document image understanding tasks. This particular model is fine-tuned on FUNSD, a dataset of manually annotated forms. It annotates the words appearing in the image as QUESTION/ANSWER/HEADER/OTHER. To use it, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. If you want to make the output bigger, right-click on it and select 'Open image in new tab'."
|
| 76 |
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2012.14740' target='_blank'>LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding</a> | <a href='https://github.com/microsoft/unilm' target='_blank'>Github Repo</a></p>"
|
| 77 |
-
examples = [["
|
| 78 |
|
| 79 |
css = ".output-image, .input-image {height: 40rem !important; width: 100% !important;}"
|
| 80 |
# css = "@media screen and (max-width: 600px) { .output_image, .input_image {height:20rem !important; width: 100% !important;} }"
|
|
|
|
| 3 |
# build detectron2 from source
|
| 4 |
# we can't build detectron2 in requirements.txt because it needs PyTorch installed first,
|
| 5 |
# but requirements.txt will try to build wheels before installing any packages.
|
| 6 |
+
os.system("pip install git+https://github.com/facebookresearch/detectron2.git --no-build-isolation")
|
| 7 |
|
| 8 |
import gradio as gr
|
| 9 |
import numpy as np
|
| 10 |
from datasets import load_dataset
|
| 11 |
+
from PIL import ImageDraw, ImageFont
|
| 12 |
from transformers import LayoutLMv2ForTokenClassification, LayoutLMv2Processor
|
| 13 |
|
| 14 |
processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased")
|
| 15 |
model = LayoutLMv2ForTokenClassification.from_pretrained("nielsr/layoutlmv2-finetuned-funsd")
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
# define id2label, label2color
|
| 18 |
+
dataset = load_dataset("nielsr/funsd", split="test")
|
| 19 |
labels = dataset.features["ner_tags"].feature.names
|
| 20 |
id2label = {v: k for v, k in enumerate(labels)}
|
| 21 |
label2color = {"question": "blue", "answer": "green", "header": "orange", "other": "violet"}
|
|
|
|
| 70 |
title = "Interactive demo: LayoutLMv2"
|
| 71 |
description = "Demo for Microsoft's LayoutLMv2, a Transformer for state-of-the-art document image understanding tasks. This particular model is fine-tuned on FUNSD, a dataset of manually annotated forms. It annotates the words appearing in the image as QUESTION/ANSWER/HEADER/OTHER. To use it, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. If you want to make the output bigger, right-click on it and select 'Open image in new tab'."
|
| 72 |
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2012.14740' target='_blank'>LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding</a> | <a href='https://github.com/microsoft/unilm' target='_blank'>Github Repo</a></p>"
|
| 73 |
+
examples = [["invoice.png"]]
|
| 74 |
|
| 75 |
css = ".output-image, .input-image {height: 40rem !important; width: 100% !important;}"
|
| 76 |
# css = "@media screen and (max-width: 600px) { .output_image, .input_image {height:20rem !important; width: 100% !important;} }"
|
requirements.txt
CHANGED
|
@@ -2,7 +2,8 @@
|
|
| 2 |
--extra-index-url https://pypi.org/simple
|
| 3 |
Pillow
|
| 4 |
numpy
|
| 5 |
-
torch
|
| 6 |
-
torchvision
|
| 7 |
-
transformers
|
|
|
|
| 8 |
pytesseract
|
|
|
|
| 2 |
--extra-index-url https://pypi.org/simple
|
| 3 |
Pillow
|
| 4 |
numpy
|
| 5 |
+
torch==2.9.1
|
| 6 |
+
torchvision==0.24.1
|
| 7 |
+
transformers==4.57.3
|
| 8 |
+
datasets==4.4.1
|
| 9 |
pytesseract
|