File size: 2,913 Bytes
ef831a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99ae753
ef831a4
 
 
bbf69d1
 
 
ef831a4
99ae753
 
 
 
ef831a4
 
 
 
 
f3902de
ef831a4
 
 
 
 
 
 
 
99ae753
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef831a4
 
 
 
 
 
 
 
 
82b1492
99ae753
ef831a4
 
6c74fb3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
from huggingface_hub import InferenceClient
import gradio as gr

inference_client = InferenceClient("google/gemma-7b-it")

# format prompt as per the chat template on the official model page: https://huggingface.co/google/gemma-7b-it 
def format_prompt(input_text, history):
    prompt = ""
    if history:
        for previous_prompt, response in history:
            prompt += f"""<start_of_turn>user
            {previous_prompt}<end_of_turn>
            <start_of_turn>model
            {response}<end_of_turn>"""
    prompt += f"""<start_of_turn>user
    {input_text}<end_of_turn>
    <start_of_turn>model"""    
    return prompt

def generate(prompt, history, temperature=0.95, max_new_tokens=512, top_p=0.9, repetition_penalty=1.0):
    if not history:
        history = []

    temperature = float(temperature)
    top_p = float(top_p)

    kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
    )

    formatted_prompt = format_prompt(prompt, history)

    response = inference_client.text_generation(formatted_prompt, **kwargs, stream=True, details=True, return_full_text=False)
    output = ""

    for chunk in response:
        output += chunk.token.text
        yield output
    return output


additional_inputs=[
    gr.Slider(
        label="Temperature",
        value=0.85,
        minimum=0.1,
        maximum=1.0,
        step=0.05,
        interactive=True,
        info="A higher value (> 1) will generate randomness and variability in the model response",
    ),
    gr.Slider(
        label="Max new tokens",
        value=512,
        minimum=128,
        maximum=1048,
        step=64,
        interactive=True,
        info="The maximum numbers of new tokens generated in the model response",
    ),
    gr.Slider(
        label="Top-p (random sampling)",
        value=0.80,
        minimum=0.1,
        maximum=1,
        step=0.05,
        interactive=True,
        info="A smaller value generates the highest probability tokens, a higher value (~ 1) allows low-probability tokens",
    ),
    gr.Slider(
        label="Repetition penalty",
        value=1.0,
        minimum=0.5,
        maximum=2.0,
        step=0.05,
        interactive=True,
        info="Penalizes repeated tokens in model response",
    )
]

chatbot = gr.Chatbot(height=500)
with gr.Blocks(theme=gr.themes.Soft()) as demo:
    gr.HTML("<center><h1>Google Gemma 7B IT</h1><center>")
    gr.ChatInterface(
        generate,
        chatbot=chatbot,  
        retry_btn=None,
        undo_btn=None,
        clear_btn="Clear",
        description="This AI agent is using a Hugging Face Inference Client for the google/gemma-7b-it model.",
        additional_inputs=additional_inputs,
        examples=[["Explain artificial intelligence in a few lines."]]
    )
demo.queue().launch()