from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
def transliteration(word: str):
model_checkpoint = "eunsour/en-ko-transliterator"
model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, src_lang="en", tgt_lang="ko")
encoded_en = tokenizer(word, truncation=True, max_length=48, return_tensors="pt")
generated_tokens = model.generate(**encoded_en)
result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
return result
transliteration("transformer")
# ['트랜스포머']
- Downloads last month
- 20
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social
visibility and check back later, or deploy to Inference Endpoints (dedicated)
instead.