htrflow_mcp / app.py
Gabriel's picture
Update app.py
bb2fbb1 verified
raw
history blame
30.3 kB
import gradio as gr
import json
import tempfile
import os
import zipfile
import shutil
from typing import List, Optional, Literal, Tuple, Union, Dict
from PIL import Image
import requests
from io import BytesIO
from concurrent.futures import ThreadPoolExecutor, as_completed
import spaces
from pathlib import Path
from visualizer import htrflow_visualizer
from htrflow.volume.volume import Collection
from htrflow.pipeline.pipeline import Pipeline
DEFAULT_OUTPUT = "alto"
FORMAT_CHOICES = [
"letter_english",
"letter_swedish",
"spread_english",
"spread_swedish",
]
FILE_CHOICES = ["txt", "alto", "page", "json"]
FormatChoices = Literal[
"letter_english", "letter_swedish", "spread_english", "spread_swedish"
]
FileChoices = Literal["txt", "alto", "page", "json"]
PIPELINE_CONFIGS = {
"letter_english": {
"steps": [
{
"step": "Segmentation",
"settings": {
"model": "yolo",
"model_settings": {
"model": "Riksarkivet/yolov9-lines-within-regions-1"
},
"generation_settings": {"batch_size": 8},
},
},
{
"step": "TextRecognition",
"settings": {
"model": "TrOCR",
"model_settings": {"model": "microsoft/trocr-base-handwritten"},
"generation_settings": {"batch_size": 16},
},
},
{"step": "OrderLines"},
]
},
"letter_swedish": {
"steps": [
{
"step": "Segmentation",
"settings": {
"model": "yolo",
"model_settings": {
"model": "Riksarkivet/yolov9-lines-within-regions-1"
},
"generation_settings": {"batch_size": 8},
},
},
{
"step": "TextRecognition",
"settings": {
"model": "TrOCR",
"model_settings": {
"model": "Riksarkivet/trocr-base-handwritten-hist-swe-2"
},
"generation_settings": {"batch_size": 16},
},
},
{"step": "OrderLines"},
]
},
"spread_english": {
"steps": [
{
"step": "Segmentation",
"settings": {
"model": "yolo",
"model_settings": {"model": "Riksarkivet/yolov9-regions-1"},
"generation_settings": {"batch_size": 4},
},
},
{
"step": "Segmentation",
"settings": {
"model": "yolo",
"model_settings": {
"model": "Riksarkivet/yolov9-lines-within-regions-1"
},
"generation_settings": {"batch_size": 8},
},
},
{
"step": "TextRecognition",
"settings": {
"model": "TrOCR",
"model_settings": {"model": "microsoft/trocr-base-handwritten"},
"generation_settings": {"batch_size": 16},
},
},
{"step": "ReadingOrderMarginalia", "settings": {"two_page": True}},
]
},
"spread_swedish": {
"steps": [
{
"step": "Segmentation",
"settings": {
"model": "yolo",
"model_settings": {"model": "Riksarkivet/yolov9-regions-1"},
"generation_settings": {"batch_size": 4},
},
},
{
"step": "Segmentation",
"settings": {
"model": "yolo",
"model_settings": {
"model": "Riksarkivet/yolov9-lines-within-regions-1"
},
"generation_settings": {"batch_size": 8},
},
},
{
"step": "TextRecognition",
"settings": {
"model": "TrOCR",
"model_settings": {
"model": "Riksarkivet/trocr-base-handwritten-hist-swe-2"
},
"generation_settings": {"batch_size": 16},
},
},
{"step": "ReadingOrderMarginalia", "settings": {"two_page": True}},
]
},
}
def handle_image_input(image_path: Union[str, None], progress: gr.Progress = None, desc_prefix: str = "") -> str:
"""
Handle image input from various sources (local file, URL, or uploaded file).
Args:
image_path: Path to image file or URL
progress: Progress tracker for UI updates
desc_prefix: Prefix for progress descriptions
Returns:
Local file path to the image
"""
if not image_path:
raise ValueError("No image provided. Please upload an image or provide a URL.")
if progress:
progress(0.1, desc=f"{desc_prefix}Processing image input...")
# If it's a URL, download the image
if isinstance(image_path, str) and (image_path.startswith("http://") or image_path.startswith("https://")):
try:
if progress:
progress(0.2, desc=f"{desc_prefix}Downloading image from URL...")
response = requests.get(image_path, timeout=30)
response.raise_for_status()
# Save to temporary file
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tmp_file:
tmp_file.write(response.content)
image_path = tmp_file.name
# Verify it's a valid image
try:
img = Image.open(image_path)
img.verify()
except Exception as e:
os.unlink(image_path)
raise ValueError(f"Downloaded file is not a valid image: {str(e)}")
except requests.RequestException as e:
raise ValueError(f"Failed to download image from URL: {str(e)}")
# Verify the file exists
if not os.path.exists(image_path):
raise ValueError(f"Image file not found: {image_path}")
return image_path
def parse_image_input(image_input: Union[str, List[str], None]) -> List[str]:
"""
Parse image input which can be a single path, multiple paths, or URLs separated by newlines.
Args:
image_input: Single image path, list of paths, or newline-separated URLs/paths
Returns:
List of image paths/URLs
"""
if not image_input:
return []
if isinstance(image_input, list):
return image_input
if isinstance(image_input, str):
# Check if it's multiple URLs/paths separated by newlines
lines = image_input.strip().split('\n')
paths = []
for line in lines:
line = line.strip()
if line: # Skip empty lines
paths.append(line)
return paths if paths else [image_input]
return []
@spaces.GPU
def _process_htr_pipeline_batch(
image_paths: List[str],
document_type: FormatChoices,
custom_settings: Optional[str] = None,
progress: gr.Progress = None
) -> Dict[str, Collection]:
"""Process HTR pipeline for multiple images and return processed collections."""
results = {}
temp_files = []
total_images = len(image_paths)
if custom_settings:
try:
config = json.loads(custom_settings)
except json.JSONDecodeError:
raise ValueError("Invalid JSON in custom_settings parameter. Please check your JSON syntax.")
else:
config = PIPELINE_CONFIGS[document_type]
# Initialize pipeline once for all images
pipeline = Pipeline.from_config(config)
for idx, image_path in enumerate(image_paths):
try:
image_name = Path(image_path).stem if not image_path.startswith("http") else f"image_{idx+1}"
if progress:
progress((idx + 0.2) / total_images,
desc=f"Processing image {idx+1}/{total_images}: {image_name}")
# Handle image input
processed_path = handle_image_input(image_path, progress,
desc_prefix=f"[{idx+1}/{total_images}] ")
# Track temp files for cleanup
if processed_path.startswith(tempfile.gettempdir()):
temp_files.append(processed_path)
if progress:
progress((idx + 0.5) / total_images,
desc=f"Running HTR on image {idx+1}/{total_images}: {image_name}")
# Process with pipeline
collection = Collection([processed_path])
processed_collection = pipeline.run(collection)
results[image_name] = processed_collection
if progress:
progress((idx + 0.9) / total_images,
desc=f"Completed image {idx+1}/{total_images}: {image_name}")
except Exception as e:
results[image_name] = f"Error: {str(e)}"
print(f"Error processing {image_path}: {str(e)}")
# Cleanup temp files
for temp_file in temp_files:
try:
os.unlink(temp_file)
except:
pass
if progress:
progress(1.0, desc=f"Completed processing {total_images} images!")
return results
def htr_text_batch(
image_input: Union[str, List[str]],
document_type: FormatChoices = "letter_swedish",
custom_settings: Optional[str] = None,
return_format: str = "separate", # "separate" or "combined"
progress: gr.Progress = gr.Progress()
) -> str:
"""
Extract text from multiple handwritten documents using HTR.
This tool processes multiple historical handwritten documents and extracts text content from each.
You can provide multiple image paths/URLs separated by newlines, or upload multiple files.
Args:
image_input: Single image path/URL, multiple paths/URLs (newline-separated), or list of uploaded files
document_type: Type of document layout - choose based on your documents' structure and language
custom_settings: Optional JSON configuration for advanced pipeline customization
return_format: "separate" to show each document's text separately, "combined" to merge all text
Returns:
Extracted text from all handwritten documents
"""
try:
progress(0, desc="Starting batch HTR text extraction...")
# Parse input to get list of images
image_paths = parse_image_input(image_input)
if not image_paths:
return "No images provided. Please upload images or provide URLs."
progress(0.1, desc=f"Processing {len(image_paths)} images...")
# Process all images
results = _process_htr_pipeline_batch(
image_paths, document_type, custom_settings, progress
)
# Extract text from results
all_texts = []
for image_name, collection in results.items():
if isinstance(collection, str): # Error case
all_texts.append(f"=== {image_name} ===\n{collection}\n")
else:
text = extract_text_from_collection(collection)
if return_format == "separate":
all_texts.append(f"=== {image_name} ===\n{text}\n")
else:
all_texts.append(text)
if return_format == "separate":
return "\n".join(all_texts)
else:
return "\n\n".join(all_texts)
except ValueError as e:
return f"Input error: {str(e)}"
except Exception as e:
return f"Batch HTR text extraction failed: {str(e)}"
def htrflow_file_batch(
image_input: Union[str, List[str]],
document_type: FormatChoices = "letter_swedish",
output_format: FileChoices = DEFAULT_OUTPUT,
custom_settings: Optional[str] = None,
server_name: str = "https://gabriel-htrflow-mcp.hf.space",
progress: gr.Progress = gr.Progress()
) -> str:
"""
Process multiple handwritten documents and generate formatted output files.
This tool performs HTR on multiple documents and exports the results in various formats.
Returns a ZIP file containing all processed documents.
Args:
image_input: Single image path/URL, multiple paths/URLs (newline-separated), or list of uploaded files
document_type: Type of document layout - affects segmentation and reading order
output_format: Desired output format (txt for plain text, alto/page for XML with coordinates, json for structured data)
custom_settings: Optional JSON configuration for advanced pipeline customization
server_name: Base URL of the server (used for generating download links)
Returns:
Path to ZIP file containing all generated files
"""
try:
progress(0, desc="Starting batch HTR file processing...")
# Parse input to get list of images
image_paths = parse_image_input(image_input)
if not image_paths:
error_file = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt')
error_file.write("No images provided. Please upload images or provide URLs.")
error_file.close()
return error_file.name
progress(0.1, desc=f"Processing {len(image_paths)} images...")
# Process all images
results = _process_htr_pipeline_batch(
image_paths, document_type, custom_settings, progress
)
progress(0.9, desc="Creating ZIP archive...")
# Create temporary directory for output files
temp_dir = Path(tempfile.mkdtemp())
output_files = []
for image_name, collection in results.items():
if isinstance(collection, str): # Error case
# Write error to text file
error_file_path = temp_dir / f"{image_name}_error.txt"
with open(error_file_path, 'w') as f:
f.write(collection)
output_files.append(error_file_path)
else:
# Save collection in requested format
export_dir = temp_dir / image_name
collection.save(directory=str(export_dir), serializer=output_format)
# Find and rename the generated file
for root, _, files in os.walk(export_dir):
for file in files:
old_path = Path(root) / file
file_ext = Path(file).suffix
new_filename = (
f"{image_name}.{output_format}"
if not file_ext
else f"{image_name}{file_ext}"
)
new_path = temp_dir / new_filename
shutil.move(str(old_path), str(new_path))
output_files.append(new_path)
break
# Create ZIP file
zip_path = temp_dir / f"htr_batch_{output_format}.zip"
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
for file_path in output_files:
zipf.write(file_path, file_path.name)
progress(1.0, desc=f"Batch processing complete! Processed {len(image_paths)} images.")
return str(zip_path)
except ValueError as e:
error_file = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt')
error_file.write(f"Input error: {str(e)}")
error_file.close()
return error_file.name
except Exception as e:
error_file = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt')
error_file.write(f"Batch HTR file generation failed: {str(e)}")
error_file.close()
return error_file.name
def htrflow_visualizer_batch(
image_input: Union[str, List[str]],
htr_documents: Union[str, List[str]],
server_name: str = "https://gabriel-htrflow-mcp.hf.space",
progress: gr.Progress = gr.Progress()
) -> str:
"""
Create visualizations for multiple HTR results overlaid on original documents.
This tool generates annotated images showing detected text regions and recognized text
for multiple documents. Returns a ZIP file containing all visualization images.
Args:
image_input: Original document image paths/URLs (newline-separated if string)
htr_documents: HTR output files (ALTO/PAGE XML) - must match order of images
server_name: Base URL of the server (used for generating download links)
Returns:
Path to ZIP file containing all visualization images
"""
try:
progress(0, desc="Starting batch visualization generation...")
# Parse inputs
image_paths = parse_image_input(image_input)
htr_paths = parse_image_input(htr_documents) if isinstance(htr_documents, str) else htr_documents
if not image_paths:
raise ValueError("No images provided")
if not htr_paths:
raise ValueError("No HTR documents provided")
if len(image_paths) != len(htr_paths):
raise ValueError(f"Number of images ({len(image_paths)}) doesn't match number of HTR documents ({len(htr_paths)})")
progress(0.1, desc=f"Creating visualizations for {len(image_paths)} documents...")
temp_dir = Path(tempfile.mkdtemp())
output_files = []
temp_files = []
for idx, (image_path, htr_path) in enumerate(zip(image_paths, htr_paths)):
try:
image_name = Path(image_path).stem if not image_path.startswith("http") else f"image_{idx+1}"
progress((idx + 0.3) / len(image_paths),
desc=f"Visualizing document {idx+1}/{len(image_paths)}: {image_name}")
# Handle image input
processed_image = handle_image_input(image_path, progress,
desc_prefix=f"[{idx+1}/{len(image_paths)}] ")
if processed_image.startswith(tempfile.gettempdir()):
temp_files.append(processed_image)
# Generate visualization
viz_result = htrflow_visualizer(processed_image, htr_path, server_name)
if viz_result and os.path.exists(viz_result):
# Move to temp dir with proper name
viz_path = temp_dir / f"{image_name}_visualization.png"
shutil.move(viz_result, str(viz_path))
output_files.append(viz_path)
except Exception as e:
# Create error file for this visualization
error_path = temp_dir / f"{image_name}_viz_error.txt"
with open(error_path, 'w') as f:
f.write(f"Visualization failed: {str(e)}")
output_files.append(error_path)
# Cleanup temp files
for temp_file in temp_files:
try:
os.unlink(temp_file)
except:
pass
progress(0.9, desc="Creating ZIP archive...")
# Create ZIP file
zip_path = temp_dir / "htr_visualizations.zip"
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
for file_path in output_files:
zipf.write(file_path, file_path.name)
progress(1.0, desc=f"Visualization complete! Created {len(output_files)} visualizations.")
return str(zip_path)
except Exception as e:
error_file = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt')
error_file.write(f"Batch visualization failed: {str(e)}")
error_file.close()
return error_file.name
def extract_text_from_collection(collection: Collection) -> str:
"""Extract and combine text from all nodes in the collection."""
text_lines = []
for page in collection.pages:
for node in page.traverse():
if hasattr(node, "text") and node.text:
text_lines.append(node.text)
return "\n".join(text_lines)
def create_htrflow_mcp_server():
# Batch HTR Text extraction interface
htr_text_batch_interface = gr.Interface(
fn=htr_text_batch,
inputs=[
gr.Textbox(
label="Image Paths/URLs (one per line) or Upload Files",
placeholder="https://example.com/image1.jpg\nhttps://example.com/image2.jpg\n\nOR drag and drop multiple files",
lines=5
),
gr.Dropdown(
choices=FORMAT_CHOICES,
value="letter_swedish",
label="Document Type",
info="Select the type that best matches your documents' layout and language"
),
gr.Textbox(
label="Custom Settings (JSON)",
placeholder='{"steps": [...]} - Leave empty for default settings',
value="",
lines=3
),
gr.Radio(
choices=["separate", "combined"],
value="separate",
label="Output Format",
info="'separate' shows each document's text with headers, 'combined' merges all text"
),
],
outputs=[gr.Textbox(label="Extracted Text", lines=20)],
title="Batch Extract Text from Handwritten Documents",
description="Process multiple handwritten document images at once. Upload files or provide URLs (one per line)",
api_name="htr_text_batch",
api_description="Extract text from multiple handwritten historical documents using advanced HTR models. Supports batch processing of letters and book spreads in English and Swedish. If a user passes a file as an input, use the upload_file_to_gradio tool, if present, to upload the file to the gradio app and create a Gradio File Input. Then use the returned path as the input to the tool",
)
# Batch HTR File generation interface
htrflow_file_batch_interface = gr.Interface(
fn=htrflow_file_batch,
inputs=[
gr.Textbox(
label="Image Paths/URLs (one per line) or Upload Files",
placeholder="https://example.com/image1.jpg\nhttps://example.com/image2.jpg\n\nOR drag and drop multiple files",
lines=5
),
gr.Dropdown(
choices=FORMAT_CHOICES,
value="letter_swedish",
label="Document Type",
info="Select the type that best matches your documents' layout and language"
),
gr.Dropdown(
choices=FILE_CHOICES,
value=DEFAULT_OUTPUT,
label="Output Format",
info="ALTO/PAGE: XML with coordinates | JSON: Structured data | TXT: Plain text only"
),
gr.Textbox(
label="Custom Settings (JSON)",
placeholder='{"steps": [...]} - Leave empty for default settings',
value="",
lines=3
),
gr.Textbox(
label="Server Name",
value="https://gabriel-htrflow-mcp.hf.space",
placeholder="Server URL for download links",
visible=False # Hide this from UI but keep for API
),
],
outputs=[gr.File(label="Download ZIP with HTR Output Files")],
title="Batch Generate HTR Output Files",
description="Process multiple handwritten documents and export in various formats. Returns a ZIP file with all results.",
api_name="htrflow_file_batch",
api_description="Process multiple handwritten documents and generate formatted output files. Returns a ZIP containing outputs in ALTO XML (with text coordinates), PAGE XML, JSON (structured data), or plain text format. If a user passes a file as an input, use the upload_file_to_gradio tool, if present, to upload the file to the gradio app and create a Gradio File Input. Then use the returned path as the input to the tool",
)
# Batch HTR Visualization interface
htrflow_viz_batch_interface = gr.Interface(
fn=htrflow_visualizer_batch,
inputs=[
gr.Textbox(
label="Original Image Paths/URLs (one per line)",
placeholder="https://example.com/image1.jpg\nhttps://example.com/image2.jpg",
lines=5
),
gr.File(
label="Upload HTR XML Files (ALTO/PAGE)",
file_types=[".xml"],
file_count="multiple"
),
gr.Textbox(
label="Server Name",
value="https://gabriel-htrflow-mcp.hf.space",
placeholder="Server URL for download links",
visible=False # Hide this from UI but keep for API
),
],
outputs=gr.File(label="Download ZIP with Visualization Images"),
title="Batch Visualize HTR Results",
description="Create annotated images for multiple documents. Images and XML files must be in matching order.",
api_name="htrflow_visualizer_batch",
api_description="Generate visualization images showing HTR results overlaid on multiple original documents. Shows detected text regions, reading order, and recognized text for quality control. Returns a ZIP file with all visualizations. If a user passes a file as an input, use the upload_file_to_gradio tool, if present, to upload the file to the gradio app and create a Gradio File Input. Then use the returned path as the input to the tool",
)
# Legacy single-file interfaces (kept for backward compatibility)
htr_text_interface = gr.Interface(
fn=lambda img, doc_type, settings: htr_text_batch(img, doc_type, settings, "separate"),
inputs=[
gr.Image(type="filepath", label="Upload Image or Enter URL"),
gr.Dropdown(
choices=FORMAT_CHOICES,
value="letter_swedish",
label="Document Type"
),
gr.Textbox(
label="Custom Settings (JSON)",
placeholder='{"steps": [...]}',
value="",
lines=3
),
],
outputs=[gr.Textbox(label="Extracted Text", lines=15)],
title="Extract Text (Single Document)",
description="Upload a single handwritten document image to extract text",
api_name="htr_text",
api_description="Extract text from handwritten historical documents using advanced HTR models. Supports letters and book spreads in English and Swedish. If a user passes a file as an input, use the upload_file_to_gradio tool, if present, to upload the file to the gradio app and create a Gradio File Input. Then use the returned path as the input to the tool",
)
htrflow_file_interface = gr.Interface(
fn=lambda img, doc_type, fmt, settings, srv: htrflow_file_batch([img], doc_type, fmt, settings, srv),
inputs=[
gr.Image(type="filepath", label="Upload Image or Enter URL"),
gr.Dropdown(
choices=FORMAT_CHOICES,
value="letter_swedish",
label="Document Type"
),
gr.Dropdown(
choices=FILE_CHOICES,
value=DEFAULT_OUTPUT,
label="Output Format"
),
gr.Textbox(
label="Custom Settings (JSON)",
value="",
lines=3
),
gr.Textbox(
label="Server Name",
value="https://gabriel-htrflow-mcp.hf.space",
visible=False
),
],
outputs=[gr.File(label="Download HTR Output File")],
title="Generate File (Single Document)",
description="Process a single handwritten document and export in various formats",
api_name="htrflow_file",
api_description="Process handwritten documents and generate formatted output files. Outputs can be in ALTO XML (with text coordinates), PAGE XML, JSON (structured data), or plain text format. If a user passes a file as an input, use the upload_file_to_gradio tool, if present, to upload the file to the gradio app and create a Gradio File Input. Then use the returned path as the input to the tool",
)
# Create tabbed interface with better organization
demo = gr.TabbedInterface(
[
htr_text_batch_interface,
htrflow_file_batch_interface,
htrflow_viz_batch_interface,
htr_text_interface,
htrflow_file_interface,
],
[
"๐Ÿ“š Batch Text Extract",
"๐Ÿ“ Batch File Generate",
"๐Ÿ–ผ๏ธ Batch Visualize",
"๐Ÿ“„ Single Text",
"๐Ÿ’พ Single File",
],
title="๐Ÿ–‹๏ธ HTRflow - Handwritten Text Recognition (Batch & Single Processing)",
analytics_enabled=False,
)
return demo
if __name__ == "__main__":
demo = create_htrflow_mcp_server()
demo.launch(
mcp_server=True,
share=False,
debug=False,
show_api=True,
favicon_path=None,
)