import gradio as gr import hazm import typing normalizer = hazm.Normalizer() sent_tokenizer = hazm.SentenceTokenizer() word_tokenizer = hazm.WordTokenizer() tagger = hazm.POSTagger( model=str("pos_tagger.model") ) def preprocess_text(text: str) -> typing.List[typing.List[str]]: """Split/normalize text into sentences/words with hazm""" text = normalizer.normalize(text) processed_sentences = [] for sentence in sent_tokenizer.tokenize(text): words = word_tokenizer.tokenize(sentence) processed_words = fix_words(words) processed_sentences.append(processed_words) return processed_sentences def fix_words(words: typing.List[str]) -> typing.List[str]: fixed_words = [] for word, pos in tagger.tag(words): if pos[-1] == "e": if word[-1] != "ِ": if (word[-1] == "ه") and (word[-2] != "ا"): word += "‌ی" word += "ِ" fixed_words.append(word) #return fixed_words return tagger.tag(words) iface = gr.Interface(fn=preprocess_text, inputs="text", outputs="text") iface.launch()