import gradio as gr
from sentencepiece import SentencePieceProcessor
import random
# License Information
# This application uses the following open-source libraries:
#
# 1. Gradio:
# - License: Apache License 2.0
# - Copyright: 2020-2023, Gradio contributors
# - Full License: http://www.apache.org/licenses/LICENSE-2.0
#
# 2. SentencePiece:
# - License: Apache License 2.0
# - Copyright: 2018 Google Inc.
# - Full License: http://www.apache.org/licenses/LICENSE-2.0
# Load the tokenizer
sp = SentencePieceProcessor("models/ver3.0/llm-jp-tokenizer-100k.ver3.0b1.model")
def get_color_mapping(tokens):
unique_tokens = list(set(tokens))
colors = ["#" + ''.join([random.choice('0123456789ABCDEF') for _ in range(6)]) for _ in unique_tokens]
color_mapping = dict(zip(unique_tokens, colors))
return color_mapping
def process_model(text, model_name):
token_ids = sp.encode(text)
tokens = [sp.id_to_piece(id) for id in token_ids]
num_tokens = len(tokens)
color_mapping = get_color_mapping(tokens)
modelname_html = f'
{model_name}
'
tokens_colored = [f'{token}' for token in tokens]
token_ids_colored = [f'{token_id}' for token, token_id in zip(tokens, token_ids)]
tokens_html = f'{model_name} Tokens
' + ' '.join(tokens_colored)
num_tokens_html = f'Number of Tokens: {num_tokens}
'
token_ids_html = f'{model_name} Token IDs
' + ' '.join(map(str, token_ids_colored))
return modelname_html + num_tokens_html + tokens_html + token_ids_html
def tokenize_input(text):
result = process_model(text, "SentencePiece Tokenizer")
num_chars = len(text)
num_chars_html = f'Number of Characters: {num_chars}
'
return num_chars_html, result
with gr.Blocks() as demo:
gr.Markdown("## SentencePiece Tokenizer App")
with gr.Row():
input_text = gr.Textbox(lines=2, placeholder="Enter text here...", label="Enter text to tokenize using SentencePiece tokenizer.")
num_chars_output = gr.HTML()
with gr.Row():
tokenizer_output = gr.HTML(label="SentencePiece Tokenizer")
input_text.change(tokenize_input, inputs=[input_text], outputs=[num_chars_output, tokenizer_output])
input_text.submit(tokenize_input, inputs=[input_text], outputs=[num_chars_output, tokenizer_output])
gr.Markdown("""
### License Information
This application uses the following open-source libraries:
1. **Gradio**:
- License: Apache License 2.0
- Copyright: 2020-2023, Gradio contributors
- Full License: [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0)
- Repository: [Gradio GitHub](https://github.com/gradio-app/gradio/)
2. **SentencePiece**:
- License: Apache License 2.0
- Copyright: 2018 Google Inc.
- Full License: [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0)
- Repository: [SentencePiece GitHub](https://github.com/google/sentencepiece)
""")
# Launch the app
demo.launch()