rishiraj commited on
Commit
a4c5bac
1 Parent(s): 82822b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -22
app.py CHANGED
@@ -16,7 +16,7 @@ logging.basicConfig(
16
 
17
  base_model = "HuggingFaceH4/zephyr-7b-beta"
18
  adapter_model = None
19
- tokenizer,model,device = load_tokenizer_and_model(base_model,adapter_model)
20
 
21
  total_count = 0
22
  def predict(text,
@@ -25,7 +25,7 @@ def predict(text,
25
  top_p,
26
  temperature,
27
  max_length_tokens,
28
- max_context_length_tokens,):
29
  if text=="":
30
  yield chatbot,history,"Empty context."
31
  return
@@ -84,7 +84,7 @@ def retry(
84
  top_p,
85
  temperature,
86
  max_length_tokens,
87
- max_context_length_tokens,
88
  ):
89
  logging.info("Retry...")
90
  if len(history) == 0:
@@ -132,36 +132,40 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
132
  with gr.Tab(label="Parameter Setting"):
133
  gr.Markdown("# Parameters")
134
  top_p = gr.Slider(
135
- minimum=-0,
136
- maximum=1.0,
137
  value=0.95,
 
 
138
  step=0.05,
139
  interactive=True,
140
- label="Top-p",
141
  )
142
  temperature = gr.Slider(
143
- minimum=0.1,
144
- maximum=2.0,
145
  value=1,
146
- step=0.1,
 
 
147
  interactive=True,
148
- label="Temperature",
149
  )
150
  max_length_tokens = gr.Slider(
 
 
151
  minimum=0,
152
- maximum=512,
153
- value=512,
154
- step=8,
155
  interactive=True,
156
- label="Max Generation Tokens",
157
  )
158
- max_context_length_tokens = gr.Slider(
159
- minimum=0,
160
- maximum=4096,
161
- value=2048,
162
- step=128,
 
163
  interactive=True,
164
- label="Max History Tokens",
165
  )
166
  gr.Markdown(description)
167
 
@@ -174,7 +178,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
174
  top_p,
175
  temperature,
176
  max_length_tokens,
177
- max_context_length_tokens,
178
  ],
179
  outputs=[chatbot, history, status_display],
180
  show_progress=True,
@@ -188,7 +192,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
188
  top_p,
189
  temperature,
190
  max_length_tokens,
191
- max_context_length_tokens,
192
  ],
193
  outputs=[chatbot, history, status_display],
194
  show_progress=True,
 
16
 
17
  base_model = "HuggingFaceH4/zephyr-7b-beta"
18
  adapter_model = None
19
+ # tokenizer,model,device = load_tokenizer_and_model(base_model,adapter_model)
20
 
21
  total_count = 0
22
  def predict(text,
 
25
  top_p,
26
  temperature,
27
  max_length_tokens,
28
+ repetition_penalty):
29
  if text=="":
30
  yield chatbot,history,"Empty context."
31
  return
 
84
  top_p,
85
  temperature,
86
  max_length_tokens,
87
+ repetition_penalty
88
  ):
89
  logging.info("Retry...")
90
  if len(history) == 0:
 
132
  with gr.Tab(label="Parameter Setting"):
133
  gr.Markdown("# Parameters")
134
  top_p = gr.Slider(
135
+ label="Top-p (nucleus sampling)",
 
136
  value=0.95,
137
+ minimum=0.0,
138
+ maximum=1,
139
  step=0.05,
140
  interactive=True,
141
+ info="Higher values sample more low-probability tokens",
142
  )
143
  temperature = gr.Slider(
144
+ label="Temperature",
 
145
  value=1,
146
+ minimum=0.0,
147
+ maximum=1.0,
148
+ step=0.05,
149
  interactive=True,
150
+ info="Higher values produce more diverse outputs",
151
  )
152
  max_length_tokens = gr.Slider(
153
+ label="Max new tokens",
154
+ value=256,
155
  minimum=0,
156
+ maximum=1048,
157
+ step=64,
 
158
  interactive=True,
159
+ info="The maximum numbers of new tokens",
160
  )
161
+ repetition_penalty = gr.Slider(
162
+ label="Repetition penalty",
163
+ value=1.2,
164
+ minimum=1.0,
165
+ maximum=2.0,
166
+ step=0.05,
167
  interactive=True,
168
+ info="Penalize repeated tokens",
169
  )
170
  gr.Markdown(description)
171
 
 
178
  top_p,
179
  temperature,
180
  max_length_tokens,
181
+ repetition_penalty,
182
  ],
183
  outputs=[chatbot, history, status_display],
184
  show_progress=True,
 
192
  top_p,
193
  temperature,
194
  max_length_tokens,
195
+ repetition_penalty,
196
  ],
197
  outputs=[chatbot, history, status_display],
198
  show_progress=True,