import gradio as gr from transformers import pipeline import torch device = 0 if torch.cuda.is_available() else -1 def generate_response(user_input, history): pipe = pipeline("text-generation", model="frameai/ChatFrame-Instruct-Persian-Small", device=device) messages = [ {"role": "user", "content": user_input}, ] response = pipe(messages, max_length=8000) return response[0]['generated_text'][1]["content"] iface = gr.ChatInterface( fn=generate_response, title="Text Generation Chatbot", description="Enter your text and get a generated response from the model." ) iface.launch()