Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, GPT2Tokenizer | |
def generate_text(sentence, max_length=100): | |
model_path = "franzemil/bolivianlm" | |
model_name = "datificate/gpt2-small-spanish" | |
# Load the model and the tokenizer | |
model = AutoModelForCausalLM.from_pretrained(model_path) | |
tokenizer = GPT2Tokenizer.from_pretrained(model_name) | |
# Generate the ids using the tokenizer | |
ids = tokenizer.encode(sentence, return_tensors="pt") | |
# Use the model to generate text | |
outputs = model.generate( | |
ids, | |
do_sample=True, | |
max_length=max_length, | |
pad_token_id=model.config.eos_token_id, | |
top_k=50, | |
top_p=0.95, | |
) | |
# Decode the and return the string | |
return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
demo = gr.Interface(fn=generate_text, inputs="text", outputs="text") | |
demo.launch() |