import gradio as gr from huggingface_hub import InferenceClient client = InferenceClient("jhangmez/CHATPRG-v0.2.1-Meta-Llama-3.1-8B-bnb-4bit-q4_k_m") def format_message(role, content): return f"<|im_start|>{role} {content}<|im_end|> " def respond(message, history, system_message, max_tokens, temperature, top_p): formatted_messages = [format_message("system", system_message)] for human, assistant in history: if human: formatted_messages.append(format_message("user", human)) if assistant: formatted_messages.append(format_message("assistant", assistant)) formatted_messages.append(format_message("user", message)) formatted_messages.append("<|im_start|>assistant ") full_prompt = "".join(formatted_messages) response = "" for chunk in client.text_generation( full_prompt, max_new_tokens=max_tokens, temperature=temperature, top_p=top_p, stream=True, ): response += chunk yield response.strip() demo = gr.ChatInterface( respond, additional_inputs=[ gr.Textbox(value="You are a friendly Chatbot.", label="System message"), gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), ], title="Meta Llama 3.1 8B Chatbot", description="A chatbot powered by Meta Llama 3.1 8B model." ) if __name__ == "__main__": demo.launch()