File size: 1,613 Bytes
75db5fd
 
 
122a387
75db5fd
bdc9c07
 
75db5fd
bdc9c07
 
75db5fd
bdc9c07
 
 
 
 
75db5fd
bdc9c07
 
75db5fd
bdc9c07
75db5fd
bdc9c07
 
 
 
75db5fd
 
bdc9c07
75db5fd
bdc9c07
 
75db5fd
 
 
 
 
 
 
bdc9c07
75db5fd
bdc9c07
 
75db5fd
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import gradio as gr
from huggingface_hub import InferenceClient

client = InferenceClient("jhangmez/CHATPRG-v0.2.1-Meta-Llama-3.1-8B-bnb-4bit-q4_k_m")

def format_message(role, content):
    return f"<|im_start|>{role} {content}<|im_end|> "

def respond(message, history, system_message, max_tokens, temperature, top_p):
    formatted_messages = [format_message("system", system_message)]

    for human, assistant in history:
        if human:
            formatted_messages.append(format_message("user", human))
        if assistant:
            formatted_messages.append(format_message("assistant", assistant))

    formatted_messages.append(format_message("user", message))
    formatted_messages.append("<|im_start|>assistant ")

    full_prompt = "".join(formatted_messages)

    response = ""
    for chunk in client.text_generation(
        full_prompt,
        max_new_tokens=max_tokens,
        temperature=temperature,
        top_p=top_p,
        stream=True,
    ):
        response += chunk
        yield response.strip()

demo = gr.ChatInterface(
    respond,
    additional_inputs=[
        gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
        gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
    ],
    title="Meta Llama 3.1 8B Chatbot",
    description="A chatbot powered by Meta Llama 3.1 8B model."
)

if __name__ == "__main__":
    demo.launch()