from transformers import pipeline | |
import gradio as gr | |
pipe = pipeline("text-generation", model="unsloth/Llama-3.2-1B-Instruct") | |
def generate_text(prompt): | |
response = pipe(prompt, max_length=512, num_return_sequences=3) | |
return response[0]['generated_text'] | |
iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", | |
title="Text Generation with LLaMA-3.2", | |
description="Enter a prompt to generate text using the LLaMA-3.2 model.") | |
iface.launch(share=True) | |