kavyamanohar's picture
Update app.py
54b4405 verified
raw
history blame
3.2 kB
import gradio as gr
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import re
from huggingface_hub import from_pretrained_keras
model = from_pretrained_keras("vrclc/transliteration")
# Define source and target tokenizers (replace with your actual tokenizers)
source_tokens = list('abcdefghijklmnopqrstuvwxyz ')
source_tokenizer = Tokenizer(char_level=True, filters='')
source_tokenizer.fit_on_texts(source_tokens)
malayalam_tokens = [
# Independent vowels
'അ', 'ആ', 'ഇ', 'ഈ', 'ഉ', 'ഊ', 'ഋ', 'ൠ', 'ഌ', 'ൡ', 'എ', 'ഏ', 'ഐ', 'ഒ', 'ഓ', 'ഔ',
# Consonants
'ക', 'ഖ', 'ഗ', 'ഘ', 'ങ', 'ച', 'ഛ', 'ജ', 'ഝ', 'ഞ',
'ട', 'ഠ', 'ഡ', 'ഢ', 'ണ', 'ത', 'ഥ', 'ദ', 'ധ', 'ന',
'പ', 'ഫ', 'ബ', 'ഭ', 'മ', 'യ', 'ര', 'ല', 'വ', 'ശ',
'ഷ', 'സ', 'ഹ', 'ള', 'ഴ', 'റ',
# Chillu letters
'ൺ', 'ൻ', 'ർ', 'ൽ', 'ൾ',
# Additional characters
'ം', 'ഃ', '്',
# Vowel modifiers / Signs
'ാ', 'ി', 'ീ', 'ു', 'ൂ', 'ൃ', 'ൄ', 'െ', 'േ', 'ൈ', 'ൊ', 'ോ', 'ൌ', 'ൗ', ' '
]
# Create tokenizer for Malayalam tokens
target_tokenizer = Tokenizer(char_level=True, filters='')
target_tokenizer.fit_on_texts(malayalam_tokens)
# Load your pre-trained model
max_seq_length = model.get_layer("encoder_input").input_shape[0][1]
def transliterate_with_split_tokens(input_text, model, source_tokenizer, target_tokenizer, max_seq_length):
"""
Transliterates input text, preserving non-token characters.
"""
# Regular expression to split the text into tokens and non-tokens
tokens_and_non_tokens = re.findall(r"([a-zA-Z]+)|([^a-zA-Z]+)", input_text)
transliterated_text = ""
for token_or_non_token in tokens_and_non_tokens:
token = token_or_non_token[0]
non_token = token_or_non_token[1]
if token:
input_sequence = source_tokenizer.texts_to_sequences([token])[0]
input_sequence_padded = pad_sequences([input_sequence], maxlen=max_seq_length, padding='post')
predicted_sequence = model.predict(input_sequence_padded)
predicted_indices = np.argmax(predicted_sequence, axis=-1)[0]
transliterated_word = ''.join([target_tokenizer.index_word[idx] for idx in predicted_indices if idx != 0])
transliterated_text += transliterated_word
elif non_token:
transliterated_text += non_token
return transliterated_text
def transliterate(input_text):
return transliterate_with_split_tokens(input_text, model, source_tokenizer, target_tokenizer, max_seq_length)
input_text = "ente veed "
transliterated_text = transliterate_with_split_tokens(input_text, model, source_tokenizer, target_tokenizer, max_seq_length)
transliterated_text
# Create a Gradio interface
iface = gr.Interface(
fn=transliterate,
inputs=gr.Textbox(lines=2, placeholder="Enter text here..."),
outputs="text",
title="English to Malayalam Transliteration",
description="Transliterate English text to Malayalam.",
)
iface.launch(share=True)