testing / app.py
efrenam's picture
Update app.py
27f5c52 verified
raw
history blame
642 Bytes
from transformers import pipeline
import gradio as gr
# Initialize the text generation pipeline
generator = pipeline("text-generation", model="distilgpt2")
def generate_response(prompt):
"""Generate text based on the user's prompt."""
response = generator(prompt, max_length=300, num_return_sequences=1)
return response[0]["generated_text"]
# Create a Gradio interface
interface = gr.Interface(
fn=generate_response,
inputs="text",
outputs="text",
title="Simple LLM",
description="Enter a prompt to get a response generated by GPT-2!"
)
# Launch the app
if __name__ == "__main__":
interface.launch()