File size: 1,905 Bytes
1a517f1
592ad8f
78744e1
592ad8f
 
b31bef1
78744e1
 
 
 
 
a187193
78744e1
 
 
a187193
78744e1
 
a187193
78744e1
 
 
 
 
592ad8f
 
78744e1
 
 
a187193
78744e1
 
069ee6d
78744e1
 
 
 
 
 
 
 
 
 
 
069ee6d
78744e1
 
 
 
 
 
 
 
 
 
 
 
 
592ad8f
78744e1
 
 
 
 
 
1a517f1
78744e1
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import os
import torch
from transformers import AutoModelForVision2Seq, AutoProcessor
from PIL import Image
import gradio as gr

# Login to Hugging Face Hub
from huggingface_hub import login
token = os.environ.get('HUGGING_FACE_HUB_TOKEN')
if token:
    login(token=token)

def load_model():
    base_model_path = "meta-llama/Llama-3.2-11B-Vision-Instruct"
    hub_model_path = "Aekanun/thai-handwriting-llm"
    
    processor = AutoProcessor.from_pretrained(base_model_path, token=token)
    model = AutoModelForVision2Seq.from_pretrained(hub_model_path, token=token)
    
    return model, processor

model, processor = load_model()

def process_image(image):
    if image is None:
        return "กรุณาอัพโหลดรูปภาพ"
        
    if not isinstance(image, Image.Image):
        image = Image.fromarray(image)
    
    if image.mode != "RGB":
        image = image.convert("RGB")

    prompt = "Transcribe the Thai handwritten text from the provided image.\nOnly return the transcription in Thai language."
    
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": prompt},
                {"type": "image", "image": image}
            ],
        }
    ]

    text = processor.apply_chat_template(messages, tokenize=False)
    inputs = processor(text=text, images=image, return_tensors="pt")
    
    with torch.no_grad():
        outputs = model.generate(
            **inputs,
            max_new_tokens=256,
            do_sample=False,
            pad_token_id=processor.tokenizer.pad_token_id
        )
    
    transcription = processor.decode(outputs[0], skip_special_tokens=True)
    return transcription.strip()

demo = gr.Interface(
    fn=process_image,
    inputs=gr.Image(type="pil"),
    outputs="text",
    title="Thai Handwriting OCR",
)

if __name__ == "__main__":
    demo.launch()