|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
import requests |
|
from diffusers import DiffusionPipeline |
|
|
|
|
|
|
|
image = gr.outputs.Image(type="pil", label="Your result") |
|
css = ".output-image{height: 528px !important} .output-carousel .output-image{height:272px !important} a{text-decoration: underline}" |
|
|
|
|
|
|
|
def translate(hindi_sentence): |
|
inputs = tokenizer.encode( |
|
hindi_sentence, return_tensors="pt",padding=True,max_length=512,truncation=True) |
|
outputs = model.generate( |
|
inputs, max_length=128, num_beams=None, early_stopping=True) |
|
|
|
|
|
translated = tokenizer.decode(outputs[0]).replace('<pad>',"").strip().lower() |
|
|
|
model_id = "CompVis/ldm-text2im-large-256" |
|
ldm = DiffusionPipeline.from_pretrained(model_id) |
|
images = ldm([translated], num_inference_steps=50, eta=0.3, guidance_scale=6)["sample"] |
|
return images |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("salesken/translation-hi-en") |
|
model = AutoModelForSeq2SeqLM.from_pretrained("salesken/translation-hi-en") |
|
|
|
|
|
|
|
|
|
|
|
|
|
exp = [["पानी पे चलती रेलगाड़ी"]] |
|
iface = gr.Interface(fn=translate, inputs="text",outputs=gr.Gallery(), examples=exp) |
|
iface.launch() |