khaled06's picture
Update app.py
6dff512 verified
raw
history blame
3.24 kB
from transformers import AutoModel, AutoTokenizer, pipeline
import gradio as gr
from PIL import Image
# Load OCR model
# tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
# model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, device_map='cuda', low_cpu_mem_usage=True)
tokenizer = AutoTokenizer.from_pretrained('srimanth-d/GOT_CPU', trust_remote_code=True)
model = AutoModel.from_pretrained('srimanth-d/GOT_CPU', trust_remote_code=True, low_cpu_mem_usage=True, use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
model = model.eval()
# Summarization model
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
# Question-Answering model (English)
pipe_qa_en = pipeline('question-answering', model="deepset/roberta-base-squad2")
# Question-Answering model (Arabic)
pipe_qa_ar = pipeline("question-answering", model="gp-tar4/QA_FineTuned")
# Translation model (English to Arabic)
pipe_to_arabic = pipeline("translation", model="Helsinki-NLP/opus-mt-en-ar")
def summarize(text):
# Summarize the text
summary = summarizer(text, max_length=200, min_length=30, do_sample=False)
return summary[0]['summary_text']
def question_answering(question, context, language='english'):
QA_input = {'question': question, 'context': context}
if language == 'arabic':
return pipe_qa_ar(QA_input)['answer']
return pipe_qa_en(QA_input)['answer']
def to_arabic(text, max_length=512):
# Split the text into chunks if it is more than 512 characters
chunks = [text[i:i+max_length] for i in range(0, len(text), max_length)]
translated_chunks = [pipe_to_arabic(chunk)[0]['translation_text'] for chunk in chunks]
return ' '.join(translated_chunks)
def process_image_and_text(image, text):
ocr_text = model.chat(tokenizer, image, ocr_type='ocr')
summarized_text = summarize(ocr_text)
return f"Input text: {text}\n\nProcessed OCR text: {summarized_text}"
def process_image_qa(language, image, question):
ocr_text = model.chat(tokenizer, image, ocr_type='ocr')
if language == 'arabic':
translated_text = to_arabic(ocr_text)
return question_answering(question, translated_text, language='arabic')
return question_answering(question, ocr_text)
# Gradio interfaces
summarization_Interface = gr.Interface(
fn=process_image_and_text,
inputs=[gr.Image(type="filepath", label="Upload Image"), gr.Textbox(label="Input Text")],
outputs=gr.Textbox(label="Output Text"),
title="OCR & Summarization",
description="Upload an image and provide some text for summarization."
)
qa_Interface = gr.Interface(
fn=process_image_qa,
inputs=[gr.Radio(['Arabic', 'English'], label='Select Language', value='Arabic'), gr.Image(type="filepath", label="Upload Image"), gr.Textbox(label="Input Question")],
outputs=gr.Textbox(label="Answer Text"),
title="OCR & Question Answering",
description="Upload an image and ask a question in English or Arabic."
)
# Combine both interfaces into a tabbed interface
apps_interface = gr.TabbedInterface([summarization_Interface, qa_Interface], tab_names=["Summarization", "Question Answering"])
apps_interface.launch()