Updated Code
Updated Code
import pytesseract
import cv2
import os
from PIL import Image
import json
import unicodedata
from pdf2image import convert_from_bytes
from pypdf import PdfReader
import numpy as np
from typing import List
import io
import logging
import time
import asyncio
import psutil
import cachetools
import hashlib
import google.generativeai as genai
from dotenv import load_dotenv
app = FastAPI()
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %
(message)s')
logger = logging.getLogger(__name__)
def log_memory_usage():
"""Log current memory usage."""
process = psutil.Process()
mem_info = process.memory_info()
return f"Memory usage: {mem_info.rss / 1024 / 1024:.2f} MB"
def get_file_hash(file_bytes):
"""Generate MD5 hash of file content."""
return hashlib.md5(file_bytes).hexdigest()
def get_text_hash(raw_text):
"""Generate MD5 hash of raw text."""
return hashlib.md5(raw_text.encode('utf-8')).hexdigest()
def get_poppler_path():
"""Determine the correct poppler path based on the system."""
import platform
import shutil
return None
try:
prompt = f"""
You are an intelligent invoice data extractor. Given raw text from an
invoice in any language and extract key business fields in the specified JSON
format. Support English. Handle synonyms (e.g., 'total' = 'net', 'tax' =
'GST'/'TDS'). The 'Products' field is dynamic and may contain multiple items, each
with 'qty', 'description', 'unit_price', and 'amount'. Detect the currency (e.g.,
USD, INR, EUR) from symbols ($, ₹, €) or text; default to USD if unclear. If a
field is missing, include it with an empty string ("") or appropriate default
(e.g., 0 for numbers).
Raw text:
{raw_text}
Output JSON:
{{
"invoice": {{
"invoice_number": "",
"invoice_date": "YYYY-MM-DD",
"due_date": "YYYY-MM-DD",
"purchase_order_number": "",
"vendor": {{
"vendor_id": "",
"name": "",
"address": {{
"line1": "",
"line2": "",
"city": "",
"state": "",
"postal_code": "",
"country": ""
}},
"contact": {{
"email": "",
"phone": ""
}},
"tax_id": ""
}},
"buyer": {{
"buyer_id": "",
"name": "",
"address": {{
"line1": "",
"line2": "",
"city": "",
"state": "",
"postal_code": "",
"country": ""
}},
"contact": {{
"email": "",
"phone": ""
}},
"tax_id": ""
}},
"items": [
{{
"item_id": "",
"description": "",
"quantity": 0,
"unit_of_measure": "",
"unit_price": 0,
"total_price": 0,
"tax_rate": 0,
"tax_amount": 0,
"discount": 0,
"net_amount": 0
}}
],
"sub_total": 0,
"tax_total": 0,
"discount_total": 0,
"total_amount": 0,
"currency": ""
}}
}}
"""
response = model.generate_content(prompt)
llm_output = response.text
json_start = llm_output.find("{")
json_end = llm_output.rfind("}") + 1
json_str = llm_output[json_start:json_end]
structured_data = json.loads(json_str)
structured_data_cache[text_hash] = structured_data
logger.info(f"Gemini processing for {filename}, took {time.time() -
start_time:.2f} seconds, {log_memory_usage()}")
return structured_data
except Exception as e:
logger.error(f"Gemini processing failed for {filename}: {str(e)},
{log_memory_usage()}")
return {"error": f"Gemini processing failed: {str(e)}"}
@app.post("/ocr")
async def extract_and_structure(files: List[UploadFile] = File(...)):
output_json = {
"success": True,
"message": "",
"data": []
}
success_count = 0
fail_count = 0
ocr_start_time = time.time()
page_texts = []
for i, img in enumerate(images):
page_text = await process_pdf_page(img, i)
page_texts.append(page_text)
raw_text = "".join(page_texts)
logger.info(f"Total OCR for {file.filename}, took
{time.time() - ocr_start_time:.2f} seconds, text length: {len(raw_text)},
{log_memory_usage()}")
except Exception as e:
fail_count += 1
error_msg = f"OCR failed: {str(e)}"
if "poppler" in str(e).lower():
error_msg += ". Please ensure Poppler is installed and
accessible in PATH."
output_json["data"].append({
"filename": file.filename,
"structured_data": {"error": error_msg},
"error": error_msg
})
logger.error(f"OCR failed for {file.filename}: {str(e)},
{log_memory_usage()}")
continue
else: # JPG/JPEG/PNG
try:
ocr_start_time = time.time()
raw_text = await process_image(file_bytes, file.filename, 0)
logger.info(f"Image OCR for {file.filename}, took {time.time()
- ocr_start_time:.2f} seconds, text length: {len(raw_text)}, {log_memory_usage()}")
except Exception as e:
fail_count += 1
output_json["data"].append({
"filename": file.filename,
"structured_data": {"error": f"Image OCR failed:
{str(e)}"},
"error": f"Image OCR failed: {str(e)}"
})
logger.error(f"Image OCR failed for {file.filename}: {str(e)},
{log_memory_usage()}")
continue
# Normalize text
try:
normalize_start_time = time.time()
raw_text = unicodedata.normalize('NFKC', raw_text)
raw_text = raw_text.encode().decode('utf-8')
raw_text_cache[file_hash] = raw_text
logger.info(f"Text normalization for {file.filename}, took
{time.time() - normalize_start_time:.2f} seconds, text length: {len(raw_text)},
{log_memory_usage()}")
except Exception as e:
logger.warning(f"Text normalization failed for {file.filename}:
{str(e)}, {log_memory_usage()}")