Spaces:
Sleeping
Sleeping
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import gradio as gr | |
tokenizer = AutoTokenizer.from_pretrained("ahmadmac/Pretrained-GPT2") | |
model = AutoModelForCausalLM.from_pretrained("ahmadmac/Pretrained-GPT2") | |
def generate_text(prompt): | |
inputs = tokenizer(prompt, return_tensors="pt") | |
outputs = model.generate(**inputs, max_length=50) | |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return generated_text | |
iface = gr.Interface( | |
fn=generate_text, | |
inputs="text", | |
outputs="text", | |
title="GPT-2 Text Generator", | |
description="Enter a prompt to generate text using GPT-2" | |
) | |
iface.launch() | |