import pandas as pd
from PIL import Image, ImageDraw
import gradio as gr
import torch
import easyocr

# 📥 Let's download some sample images to play with!
def download_sample_images():
    image_urls = {
        '20-Books.jpg': 'https://raw.githubusercontent.com/AaronCWacker/Yggdrasil/master/images/20-Books.jpg',
        'COVID.png': 'https://github.com/JaidedAI/EasyOCR/raw/master/examples/english.png',
        'chinese.jpg': 'https://github.com/JaidedAI/EasyOCR/raw/master/examples/chinese.jpg',
        'japanese.jpg': 'https://github.com/JaidedAI/EasyOCR/raw/master/examples/japanese.jpg',
        'Hindi.jpeg': 'https://i.imgur.com/mwQFd7G.jpeg'
    }
    for filename, url in image_urls.items():
        # 🛸 Beaming down image: {filename}
        torch.hub.download_url_to_file(url, filename)

# 🖌️ Function to draw boxes around detected text (because we all love boxes)
def draw_boxes(image, bounds, color='yellow', width=2):
    draw = ImageDraw.Draw(image)
    for bound in bounds:
        # 🧙‍♂️ Drawing magic rectangles
        p0, p1, p2, p3 = bound[0]
        draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
    return image

# 🔮 The core function that does the OCR wizardry
def inference(img_path, lang):
    # 🕵️‍♂️ Reading the image, please hold...
    reader = easyocr.Reader(lang)
    bounds = reader.readtext(img_path)
    im = Image.open(img_path)
    draw_boxes(im, bounds)
    result_path = 'result.jpg'
    im.save(result_path)
    return [result_path, pd.DataFrame(bounds).iloc[:, 1:]]

# 🚀 Time to set up the Gradio app!
def main():
    title = '🖼️ Image to Multilingual OCR 👁️ with Gradio'
    description = 'Multilingual OCR that works conveniently on all devices in multiple languages. 🌐'

    examples = [
        ['20-Books.jpg', ['en']],
        ['COVID.png', ['en']],
        ['chinese.jpg', ['ch_sim', 'en']],
        ['japanese.jpg', ['ja', 'en']],
        ['Hindi.jpeg', ['hi', 'en']]
    ]

    css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
    choices = ["ch_sim", "ch_tra", "de", "en", "es", "ja", "hi", "ru"]

    with gr.Blocks(css=css) as demo:
        gr.Markdown(f"# {title}\n\n{description}")
        
        with gr.Row():
            with gr.Column():
                img_input = gr.Image(type='filepath', label='📥 Input Image')
                lang_input = gr.CheckboxGroup(choices, value=['en'], label='🗣️ Language(s)')
                submit_btn = gr.Button("Start OCR 🕵️‍♂️")
            with gr.Column():
                img_output = gr.Image(type='filepath', label='📤 Output Image')
                df_output = gr.Dataframe(headers=['Text', 'Confidence'])

        gr.Examples(
            examples=examples,
            inputs=[img_input, lang_input],
            outputs=[img_output, df_output],
            examples_per_page=5,
            cache_examples=False
        )

        submit_btn.click(fn=inference, inputs=[img_input, lang_input], outputs=[img_output, df_output])

    demo.launch(debug=True)

if __name__ == "__main__":
    download_sample_images()
    main()