File size: 3,239 Bytes
2751939
 
 
 
 
6dff512
 
 
 
 
 
 
2751939
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8912ed0
2751939
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
from transformers import AutoModel, AutoTokenizer, pipeline
import gradio as gr
from PIL import Image

# Load OCR model
# tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
# model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, device_map='cuda', low_cpu_mem_usage=True)


tokenizer = AutoTokenizer.from_pretrained('srimanth-d/GOT_CPU', trust_remote_code=True)
model = AutoModel.from_pretrained('srimanth-d/GOT_CPU', trust_remote_code=True, low_cpu_mem_usage=True, use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
model = model.eval()

# Summarization model
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")

# Question-Answering model (English)
pipe_qa_en = pipeline('question-answering', model="deepset/roberta-base-squad2")

# Question-Answering model (Arabic)
pipe_qa_ar = pipeline("question-answering", model="gp-tar4/QA_FineTuned")

# Translation model (English to Arabic)
pipe_to_arabic = pipeline("translation", model="Helsinki-NLP/opus-mt-en-ar")


def summarize(text):
    # Summarize the text
    summary = summarizer(text, max_length=200, min_length=30, do_sample=False)
    return summary[0]['summary_text']


def question_answering(question, context, language='english'):
    QA_input = {'question': question, 'context': context}
    if language == 'arabic':
        return pipe_qa_ar(QA_input)['answer']
    return pipe_qa_en(QA_input)['answer']


def to_arabic(text, max_length=512):
    # Split the text into chunks if it is more than 512 characters
    chunks = [text[i:i+max_length] for i in range(0, len(text), max_length)]
    translated_chunks = [pipe_to_arabic(chunk)[0]['translation_text'] for chunk in chunks]
    return ' '.join(translated_chunks)


def process_image_and_text(image, text):
    ocr_text = model.chat(tokenizer, image, ocr_type='ocr')
    summarized_text = summarize(ocr_text)
    return f"Input text: {text}\n\nProcessed OCR text: {summarized_text}"


def process_image_qa(language, image, question):
    ocr_text = model.chat(tokenizer, image, ocr_type='ocr')
    if language == 'arabic':
        translated_text = to_arabic(ocr_text)
        return question_answering(question, translated_text, language='arabic')
    return question_answering(question, ocr_text)


# Gradio interfaces
summarization_Interface = gr.Interface(
    fn=process_image_and_text,
    inputs=[gr.Image(type="filepath", label="Upload Image"), gr.Textbox(label="Input Text")],
    outputs=gr.Textbox(label="Output Text"),
    title="OCR & Summarization",
    description="Upload an image and provide some text for summarization."
)

qa_Interface = gr.Interface(
    fn=process_image_qa,
    inputs=[gr.Radio(['Arabic', 'English'], label='Select Language', value='Arabic'), gr.Image(type="filepath", label="Upload Image"), gr.Textbox(label="Input Question")],
    outputs=gr.Textbox(label="Answer Text"),
    title="OCR & Question Answering",
    description="Upload an image and ask a question in English or Arabic."
)

# Combine both interfaces into a tabbed interface
apps_interface = gr.TabbedInterface([summarization_Interface, qa_Interface], tab_names=["Summarization", "Question Answering"])


apps_interface.launch()