Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -32,7 +32,7 @@ def caption_and_translate(img, min_len, max_len):
|
|
| 32 |
raw_image = Image.open(img).convert('RGB')
|
| 33 |
inputs_blip = processor_blip(raw_image, return_tensors="pt")
|
| 34 |
|
| 35 |
-
out_blip = model_blip.generate(**inputs_blip, min_length=
|
| 36 |
english_caption = processor_blip.decode(out_blip[0], skip_special_tokens=True)
|
| 37 |
|
| 38 |
# Translate caption from English to Arabic
|
|
@@ -47,9 +47,9 @@ def caption_and_translate(img, min_len, max_len):
|
|
| 47 |
# Gradio interface with multiple outputs
|
| 48 |
img_cap_en_ar = gr.Interface(
|
| 49 |
fn=caption_and_translate,
|
| 50 |
-
inputs=[gr.Image(type='filepath', label='Image')
|
| 51 |
-
gr.Slider(label='Minimum Length', minimum=1, maximum=500, value=30),
|
| 52 |
-
gr.Slider(label='Maximum Length', minimum=1, maximum=500, value=100)],
|
| 53 |
outputs=[gr.Textbox(label='English Caption'),
|
| 54 |
gr.HTML(label='Arabic Caption')],
|
| 55 |
title='Image Captioning | وصف الصورة',
|
|
@@ -88,7 +88,7 @@ text_recognition = gr.Interface(
|
|
| 88 |
outputs=[gr.Textbox(label='Extracted text'), gr.HTML(label= 'Translateted of Extracted text ')], # Output is text
|
| 89 |
title="Text Extraction and Translation | إستخراج النص وترجمتة",
|
| 90 |
description="Upload an image then Submet to extract text and translate it to Arabic| قم برفع الصورة وأرسلها ليظهر لك النص من الصورة",
|
| 91 |
-
examples =[["
|
| 92 |
)
|
| 93 |
|
| 94 |
# Load trocr model for handwritten text extraction
|
|
@@ -120,7 +120,7 @@ handwritten_rec = gr.Interface(
|
|
| 120 |
gr.HTML(label='Arabic Text')],
|
| 121 |
title="Handwritten Text Extraction | | إستخراج النص المكتوب بخط اليد وترجمتة",
|
| 122 |
description="Upload an image then Submet to extract text and translate it to Arabic| قم برفع الصورة وأرسلها ليظهر لك النص من الصورة",
|
| 123 |
-
examples =[["tx_image_1.png"]]
|
| 124 |
)
|
| 125 |
|
| 126 |
# Combine all interfaces into a tabbed interface
|
|
|
|
| 32 |
raw_image = Image.open(img).convert('RGB')
|
| 33 |
inputs_blip = processor_blip(raw_image, return_tensors="pt")
|
| 34 |
|
| 35 |
+
out_blip = model_blip.generate(**inputs_blip, min_length=70, max_length=1000)
|
| 36 |
english_caption = processor_blip.decode(out_blip[0], skip_special_tokens=True)
|
| 37 |
|
| 38 |
# Translate caption from English to Arabic
|
|
|
|
| 47 |
# Gradio interface with multiple outputs
|
| 48 |
img_cap_en_ar = gr.Interface(
|
| 49 |
fn=caption_and_translate,
|
| 50 |
+
inputs=[gr.Image(type='filepath', label='Image')]
|
| 51 |
+
#gr.Slider(label='Minimum Length', minimum=1, maximum=500, value=30),
|
| 52 |
+
#gr.Slider(label='Maximum Length', minimum=1, maximum=500, value=100)],
|
| 53 |
outputs=[gr.Textbox(label='English Caption'),
|
| 54 |
gr.HTML(label='Arabic Caption')],
|
| 55 |
title='Image Captioning | وصف الصورة',
|
|
|
|
| 88 |
outputs=[gr.Textbox(label='Extracted text'), gr.HTML(label= 'Translateted of Extracted text ')], # Output is text
|
| 89 |
title="Text Extraction and Translation | إستخراج النص وترجمتة",
|
| 90 |
description="Upload an image then Submet to extract text and translate it to Arabic| قم برفع الصورة وأرسلها ليظهر لك النص من الصورة",
|
| 91 |
+
examples =[["image_0.png"], ["image_1.png"]]
|
| 92 |
)
|
| 93 |
|
| 94 |
# Load trocr model for handwritten text extraction
|
|
|
|
| 120 |
gr.HTML(label='Arabic Text')],
|
| 121 |
title="Handwritten Text Extraction | | إستخراج النص المكتوب بخط اليد وترجمتة",
|
| 122 |
description="Upload an image then Submet to extract text and translate it to Arabic| قم برفع الصورة وأرسلها ليظهر لك النص من الصورة",
|
| 123 |
+
examples =[["tx_image_1.png"], ["tx_image_3.png"]]
|
| 124 |
)
|
| 125 |
|
| 126 |
# Combine all interfaces into a tabbed interface
|