Malayalam Transliteration
Collection
Transliteration Datasets and Models for Malayalam
•
8 items
•
Updated
Sequence to Sequence Model for Treansliterationg Romanised Malayalam (Manglish) to Native Script.
The model needs to have an user defined tokenizers for source and target scripts. The model is trained on words. If your use case involves transliterating full sentences, split the sentences into words before passing to the model.
import keras
import huggingface_hub
import tensorflow as tf
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from huggingface_hub import from_pretrained_keras
import re
model = from_pretrained_keras("vrclc/transliteration")
source_tokens = list('abcdefghijklmnopqrstuvwxyz ')
source_tokenizer = Tokenizer(char_level=True, filters='')
source_tokenizer.fit_on_texts(source_tokens)
target_tokens = [
# Independent vowels
'അ', 'ആ', 'ഇ', 'ഈ', 'ഉ', 'ഊ', 'ഋ', 'ൠ', 'ഌ', 'ൡ', 'എ', 'ഏ', 'ഐ', 'ഒ', 'ഓ', 'ഔ',
# Consonants
'ക', 'ഖ', 'ഗ', 'ഘ', 'ങ', 'ച', 'ഛ', 'ജ', 'ഝ', 'ഞ',
'ട', 'ഠ', 'ഡ', 'ഢ', 'ണ', 'ത', 'ഥ', 'ദ', 'ധ', 'ന',
'പ', 'ഫ', 'ബ', 'ഭ', 'മ', 'യ', 'ര', 'ല', 'വ', 'ശ',
'ഷ', 'സ', 'ഹ', 'ള', 'ഴ', 'റ',
# Chillu letters
'ൺ', 'ൻ', 'ർ', 'ൽ', 'ൾ',
# Additional characters
'ം', 'ഃ', '്',
# Vowel modifiers / Signs
'ാ', 'ി', 'ീ', 'ു', 'ൂ', 'ൃ', 'ൄ', 'െ', 'േ', 'ൈ', 'ൊ', 'ോ', 'ൌ', 'ൗ', ' '
]
target_tokenizer = Tokenizer(char_level=True, filters='')
target_tokenizer.fit_on_texts(target_tokens)
max_seq_length = model.get_layer("encoder_input").input_shape[0][1]
def transliterate_with_split_tokens(input_text, model, source_tokenizer, target_tokenizer, max_seq_length):
"""
Transliterates input text in roman script, retains all other characters (including punctuation, spaces, etc.)
"""
# Regular expression to split the text into tokens and non-tokens
tokens_and_non_tokens = re.findall(r"([a-zA-Z]+)|([^a-zA-Z]+)", input_text)
transliterated_text = ""
for token_or_non_token in tokens_and_non_tokens:
token = token_or_non_token[0]
non_token = token_or_non_token[1]
if token:
input_sequence = source_tokenizer.texts_to_sequences([token])[0]
input_sequence_padded = pad_sequences([input_sequence], maxlen=max_seq_length, padding='post')
predicted_sequence = model.predict(input_sequence_padded)
predicted_indices = np.argmax(predicted_sequence, axis=-1)[0]
transliterated_word = ''.join([target_tokenizer.index_word[idx] for idx in predicted_indices if idx != 0])
transliterated_text += transliterated_word
elif non_token:
transliterated_text += non_token
return transliterated_text
input text = "ente veedu"
transliterated_text = transliterate_with_split_tokens(input_text, model, source_tokenizer, target_tokenizer, max_seq_length)
print(transliterated_text)
@article{baiju2024romanized,
title={Romanized to Native Malayalam Script Transliteration Using an Encoder-Decoder Framework},
author={Baiju, Bajiyo and Pillai, Leena G and Manohar, Kavya and Sherly, Elizabeth},
journal={arXiv preprint arXiv:2412.09957},
year={2024}
}