import torch from transformers import pipeline import gradio as gr import spaces from transformers import AutoTokenizer, AutoModelForCausalLM @spaces.GPU def greet(name): pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto") messages = [ { "role": "system", "content": "You are a friendly chatbot who always responds in the Professsional way", }, {"role": "user", "content": name}, ] prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) outputs = pipe(prompt, max_new_tokens=2048, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) return outputs[0]["generated_text"] demo = gr.Interface( fn=greet, inputs=["text"], outputs=["text"], ) demo.launch(share=True)