Spaces:
Sleeping
Sleeping
File size: 2,019 Bytes
2d0a0f5 5dca0b0 2d0a0f5 5dca0b0 86eb1ac 5dca0b0 2d0a0f5 86eb1ac |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import gradio as gr
from transformers import AutoTokenizer
import ast
model_path = "models/"
import gradio as gr
# Available models
MODELS = ["Meta-Llama-3.1-8B"]
def process_input(input_type, input_value, model_name):
# Initialize tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_path+model_name)
if input_type == "Text":
# Tokenize the text
tokens = tokenizer.tokenize(input_value)
token_ids = tokenizer.encode(input_value)
# Create output strings
# tokens_str = [f"{i+1}. {token}" for i, token in enumerate(tokens)]
# token_ids_str = " ".join(map(str, token_ids))
return f"Total tokens: {len(tokens)}", tokens, token_ids
elif input_type == "Token IDs":
try:
token_ids = ast.literal_eval(input_value)
# Convert string of token IDs to list of integers
# token_ids = list(map(int, input_value.split()))
# Convert token IDs back to text
text = tokenizer.decode(token_ids)
# print("The decoded text",text)
# Tokenize the text to get individual tokens
# Create output strings
return f"Total tokens: {len(token_ids)}", text, input_value
except ValueError:
return "Error", "Invalid input. Please enter space-separated integers for Token IDs.", ""
# Create Gradio interface
iface = gr.Interface(
fn=process_input,
inputs=[
gr.Radio(["Text", "Token IDs"], label="Input Type", value="Text"),
gr.Textbox(lines=5, label="Input"),
gr.Dropdown(choices=MODELS, label="Select Model")
],
outputs=[
gr.Textbox(label="Token Count"),
gr.Textbox(label="Tokens", lines=10),
gr.Textbox(label="Token IDS", lines=5)
],
title="LLM Tokenization and Token ID Converter",
description="Enter text or token IDs and select a model to see the conversion results."
)
if __name__ == "__main__":
iface.launch() |