mgoin commited on
Commit
5e818be
1 Parent(s): 33e91ad
Files changed (3) hide show
  1. app.py +119 -0
  2. requirements.txt +2 -0
  3. style.css +17 -0
app.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import gradio as gr
4
+ import spaces
5
+ import torch
6
+ from transformers import AutoTokenizer
7
+ from vllm import LLM, SamplingParams
8
+
9
+ MAX_MAX_NEW_TOKENS = 2048
10
+ DEFAULT_MAX_NEW_TOKENS = 1024
11
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
12
+
13
+ DESCRIPTION = """\
14
+ # NM vLLM Hermes Mistral Chat
15
+ """
16
+
17
+ if not torch.cuda.is_available():
18
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
19
+
20
+
21
+ if torch.cuda.is_available():
22
+ model_id = "nm-testing/OpenHermes-2.5-Mistral-7B-pruned50"
23
+ model = LLM(model_id, max_model_len=MAX_INPUT_TOKEN_LENGTH)
24
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
25
+ tokenizer.use_default_system_prompt = False
26
+
27
+
28
+ @spaces.GPU
29
+ def generate(
30
+ message: str,
31
+ chat_history: list[tuple[str, str]],
32
+ system_prompt: str,
33
+ max_new_tokens: int = 1024,
34
+ temperature: float = 0.6,
35
+ top_p: float = 0.9,
36
+ top_k: int = 50,
37
+ repetition_penalty: float = 1.2,
38
+ ) -> str:
39
+ conversation = []
40
+ if system_prompt:
41
+ conversation.append({"role": "system", "content": system_prompt})
42
+ for user, assistant in chat_history:
43
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
44
+ conversation.append({"role": "user", "content": message})
45
+
46
+ formatted_conversation = tokenizer.apply_chat_template(conversation, tokenize=False)
47
+
48
+ sampling_params = SamplingParams(
49
+ max_tokens=max_new_tokens,
50
+ top_p=top_p,
51
+ top_k=top_k,
52
+ temperature=temperature,
53
+ repetition_penalty=repetition_penalty,
54
+ )
55
+
56
+ outputs = model.generate(formatted_conversation, sampling_params)
57
+
58
+ for output in outputs:
59
+ generated_text = output.outputs[0].text
60
+ return generated_text
61
+
62
+
63
+ chat_interface = gr.ChatInterface(
64
+ fn=generate,
65
+ additional_inputs=[
66
+ gr.Textbox(label="System prompt", lines=6),
67
+ gr.Slider(
68
+ label="Max new tokens",
69
+ minimum=1,
70
+ maximum=MAX_MAX_NEW_TOKENS,
71
+ step=1,
72
+ value=DEFAULT_MAX_NEW_TOKENS,
73
+ ),
74
+ gr.Slider(
75
+ label="Temperature",
76
+ minimum=0.1,
77
+ maximum=4.0,
78
+ step=0.1,
79
+ value=0.6,
80
+ ),
81
+ gr.Slider(
82
+ label="Top-p (nucleus sampling)",
83
+ minimum=0.05,
84
+ maximum=1.0,
85
+ step=0.05,
86
+ value=0.9,
87
+ ),
88
+ gr.Slider(
89
+ label="Top-k",
90
+ minimum=1,
91
+ maximum=1000,
92
+ step=1,
93
+ value=50,
94
+ ),
95
+ gr.Slider(
96
+ label="Repetition penalty",
97
+ minimum=1.0,
98
+ maximum=2.0,
99
+ step=0.05,
100
+ value=1.2,
101
+ ),
102
+ ],
103
+ stop_btn=None,
104
+ examples=[
105
+ ["Hello there! How are you doing?"],
106
+ ["Can you explain briefly to me what is the Python programming language?"],
107
+ ["Explain the plot of Cinderella in a sentence."],
108
+ ["How many hours does it take a man to eat a Helicopter?"],
109
+ ["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
110
+ ],
111
+ )
112
+
113
+ with gr.Blocks(css="style.css") as demo:
114
+ gr.Markdown(DESCRIPTION)
115
+ gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
116
+ chat_interface.render()
117
+
118
+ if __name__ == "__main__":
119
+ demo.queue(max_size=20).launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ vllm
2
+ gradio
style.css ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ display: block;
4
+ }
5
+
6
+ #duplicate-button {
7
+ margin: auto;
8
+ color: white;
9
+ background: #1565c0;
10
+ border-radius: 100vh;
11
+ }
12
+
13
+ .contain {
14
+ max-width: 900px;
15
+ margin: auto;
16
+ padding-top: 1.5rem;
17
+ }