Spaces:
Running
Running
increase chatbot size
Browse files
app.py
CHANGED
@@ -12,7 +12,7 @@ import fireworks_gradio
|
|
12 |
|
13 |
|
14 |
with gr.Blocks(fill_height=True) as demo:
|
15 |
-
with gr.Tab("
|
16 |
with gr.Row():
|
17 |
llama_model = gr.Dropdown(
|
18 |
choices=[
|
@@ -33,7 +33,8 @@ with gr.Blocks(fill_height=True) as demo:
|
|
33 |
name=llama_model.value,
|
34 |
src=sambanova_gradio.registry,
|
35 |
multimodal=True,
|
36 |
-
fill_height=True
|
|
|
37 |
)
|
38 |
|
39 |
def update_llama_model(new_model):
|
@@ -41,7 +42,8 @@ with gr.Blocks(fill_height=True) as demo:
|
|
41 |
name=new_model,
|
42 |
src=sambanova_gradio.registry,
|
43 |
multimodal=True,
|
44 |
-
fill_height=True
|
|
|
45 |
)
|
46 |
|
47 |
llama_model.change(
|
@@ -68,14 +70,16 @@ with gr.Blocks(fill_height=True) as demo:
|
|
68 |
gemini_interface = gr.load(
|
69 |
name=gemini_model.value,
|
70 |
src=gemini_gradio.registry,
|
71 |
-
fill_height=True
|
|
|
72 |
)
|
73 |
|
74 |
def update_gemini_model(new_model):
|
75 |
return gr.load(
|
76 |
name=new_model,
|
77 |
src=gemini_gradio.registry,
|
78 |
-
fill_height=True
|
|
|
79 |
)
|
80 |
|
81 |
gemini_model.change(
|
@@ -114,7 +118,8 @@ with gr.Blocks(fill_height=True) as demo:
|
|
114 |
name=model_choice.value,
|
115 |
src=openai_gradio.registry,
|
116 |
accept_token=True,
|
117 |
-
fill_height=True
|
|
|
118 |
)
|
119 |
|
120 |
def update_model(new_model):
|
@@ -122,7 +127,8 @@ with gr.Blocks(fill_height=True) as demo:
|
|
122 |
name=new_model,
|
123 |
src=openai_gradio.registry,
|
124 |
accept_token=True,
|
125 |
-
fill_height=True
|
|
|
126 |
)
|
127 |
|
128 |
model_choice.change(
|
@@ -149,7 +155,8 @@ with gr.Blocks(fill_height=True) as demo:
|
|
149 |
name=claude_model.value,
|
150 |
src=anthropic_gradio.registry,
|
151 |
accept_token=True,
|
152 |
-
fill_height=True
|
|
|
153 |
)
|
154 |
|
155 |
def update_claude_model(new_model):
|
@@ -157,7 +164,8 @@ with gr.Blocks(fill_height=True) as demo:
|
|
157 |
name=new_model,
|
158 |
src=anthropic_gradio.registry,
|
159 |
accept_token=True,
|
160 |
-
fill_height=True
|
|
|
161 |
)
|
162 |
|
163 |
claude_model.change(
|
@@ -170,7 +178,8 @@ with gr.Blocks(fill_height=True) as demo:
|
|
170 |
name='grok-beta',
|
171 |
src=xai_gradio.registry,
|
172 |
accept_token=True,
|
173 |
-
fill_height=True
|
|
|
174 |
)
|
175 |
with gr.Tab("Qwen"):
|
176 |
with gr.Row():
|
@@ -187,14 +196,16 @@ with gr.Blocks(fill_height=True) as demo:
|
|
187 |
qwen_interface = gr.load(
|
188 |
name=qwen_model.value,
|
189 |
src=hyperbolic_gradio.registry,
|
190 |
-
fill_height=True
|
|
|
191 |
)
|
192 |
|
193 |
def update_qwen_model(new_model):
|
194 |
return gr.load(
|
195 |
name=new_model,
|
196 |
src=hyperbolic_gradio.registry,
|
197 |
-
fill_height=True
|
|
|
198 |
)
|
199 |
|
200 |
qwen_model.change(
|
@@ -234,7 +245,8 @@ with gr.Blocks(fill_height=True) as demo:
|
|
234 |
name=perplexity_model.value,
|
235 |
src=perplexity_gradio.registry,
|
236 |
accept_token=True,
|
237 |
-
fill_height=True
|
|
|
238 |
)
|
239 |
|
240 |
def update_perplexity_model(new_model):
|
@@ -242,7 +254,8 @@ with gr.Blocks(fill_height=True) as demo:
|
|
242 |
name=new_model,
|
243 |
src=perplexity_gradio.registry,
|
244 |
accept_token=True,
|
245 |
-
fill_height=True
|
|
|
246 |
)
|
247 |
|
248 |
perplexity_model.change(
|
@@ -263,7 +276,8 @@ with gr.Blocks(fill_height=True) as demo:
|
|
263 |
gr.load(
|
264 |
name='deepseek-ai/DeepSeek-V2.5',
|
265 |
src=hyperbolic_gradio.registry,
|
266 |
-
fill_height=True
|
|
|
267 |
)
|
268 |
gr.Markdown("""
|
269 |
<div>
|
@@ -298,14 +312,16 @@ with gr.Blocks(fill_height=True) as demo:
|
|
298 |
mistral_interface = gr.load(
|
299 |
name=mistral_model.value,
|
300 |
src=mistral_gradio.registry,
|
301 |
-
fill_height=True
|
|
|
302 |
)
|
303 |
|
304 |
def update_mistral_model(new_model):
|
305 |
return gr.load(
|
306 |
name=new_model,
|
307 |
src=mistral_gradio.registry,
|
308 |
-
fill_height=True
|
|
|
309 |
)
|
310 |
|
311 |
mistral_model.change(
|
@@ -338,14 +354,16 @@ with gr.Blocks(fill_height=True) as demo:
|
|
338 |
fireworks_interface = gr.load(
|
339 |
name=fireworks_model.value,
|
340 |
src=fireworks_gradio.registry,
|
341 |
-
fill_height=True
|
|
|
342 |
)
|
343 |
|
344 |
def update_fireworks_model(new_model):
|
345 |
return gr.load(
|
346 |
name=new_model,
|
347 |
src=fireworks_gradio.registry,
|
348 |
-
fill_height=True
|
|
|
349 |
)
|
350 |
|
351 |
fireworks_model.change(
|
|
|
12 |
|
13 |
|
14 |
with gr.Blocks(fill_height=True) as demo:
|
15 |
+
with gr.Tab("Sambanova"):
|
16 |
with gr.Row():
|
17 |
llama_model = gr.Dropdown(
|
18 |
choices=[
|
|
|
33 |
name=llama_model.value,
|
34 |
src=sambanova_gradio.registry,
|
35 |
multimodal=True,
|
36 |
+
fill_height=True,
|
37 |
+
chatbot=gr.Chatbot(height=600)
|
38 |
)
|
39 |
|
40 |
def update_llama_model(new_model):
|
|
|
42 |
name=new_model,
|
43 |
src=sambanova_gradio.registry,
|
44 |
multimodal=True,
|
45 |
+
fill_height=True,
|
46 |
+
chatbot=gr.Chatbot(height=600)
|
47 |
)
|
48 |
|
49 |
llama_model.change(
|
|
|
70 |
gemini_interface = gr.load(
|
71 |
name=gemini_model.value,
|
72 |
src=gemini_gradio.registry,
|
73 |
+
fill_height=True,
|
74 |
+
chatbot=gr.Chatbot(height=600)
|
75 |
)
|
76 |
|
77 |
def update_gemini_model(new_model):
|
78 |
return gr.load(
|
79 |
name=new_model,
|
80 |
src=gemini_gradio.registry,
|
81 |
+
fill_height=True,
|
82 |
+
chatbot=gr.Chatbot(height=600)
|
83 |
)
|
84 |
|
85 |
gemini_model.change(
|
|
|
118 |
name=model_choice.value,
|
119 |
src=openai_gradio.registry,
|
120 |
accept_token=True,
|
121 |
+
fill_height=True,
|
122 |
+
chatbot=gr.Chatbot(height=600)
|
123 |
)
|
124 |
|
125 |
def update_model(new_model):
|
|
|
127 |
name=new_model,
|
128 |
src=openai_gradio.registry,
|
129 |
accept_token=True,
|
130 |
+
fill_height=True,
|
131 |
+
chatbot=gr.Chatbot(height=600)
|
132 |
)
|
133 |
|
134 |
model_choice.change(
|
|
|
155 |
name=claude_model.value,
|
156 |
src=anthropic_gradio.registry,
|
157 |
accept_token=True,
|
158 |
+
fill_height=True,
|
159 |
+
chatbot=gr.Chatbot(height=600)
|
160 |
)
|
161 |
|
162 |
def update_claude_model(new_model):
|
|
|
164 |
name=new_model,
|
165 |
src=anthropic_gradio.registry,
|
166 |
accept_token=True,
|
167 |
+
fill_height=True,
|
168 |
+
chatbot=gr.Chatbot(height=600)
|
169 |
)
|
170 |
|
171 |
claude_model.change(
|
|
|
178 |
name='grok-beta',
|
179 |
src=xai_gradio.registry,
|
180 |
accept_token=True,
|
181 |
+
fill_height=True,
|
182 |
+
chatbot=gr.Chatbot(height=600)
|
183 |
)
|
184 |
with gr.Tab("Qwen"):
|
185 |
with gr.Row():
|
|
|
196 |
qwen_interface = gr.load(
|
197 |
name=qwen_model.value,
|
198 |
src=hyperbolic_gradio.registry,
|
199 |
+
fill_height=True,
|
200 |
+
chatbot=gr.Chatbot(height=600)
|
201 |
)
|
202 |
|
203 |
def update_qwen_model(new_model):
|
204 |
return gr.load(
|
205 |
name=new_model,
|
206 |
src=hyperbolic_gradio.registry,
|
207 |
+
fill_height=True,
|
208 |
+
chatbot=gr.Chatbot(height=600)
|
209 |
)
|
210 |
|
211 |
qwen_model.change(
|
|
|
245 |
name=perplexity_model.value,
|
246 |
src=perplexity_gradio.registry,
|
247 |
accept_token=True,
|
248 |
+
fill_height=True,
|
249 |
+
chatbot=gr.Chatbot(height=600)
|
250 |
)
|
251 |
|
252 |
def update_perplexity_model(new_model):
|
|
|
254 |
name=new_model,
|
255 |
src=perplexity_gradio.registry,
|
256 |
accept_token=True,
|
257 |
+
fill_height=True,
|
258 |
+
chatbot=gr.Chatbot(height=600)
|
259 |
)
|
260 |
|
261 |
perplexity_model.change(
|
|
|
276 |
gr.load(
|
277 |
name='deepseek-ai/DeepSeek-V2.5',
|
278 |
src=hyperbolic_gradio.registry,
|
279 |
+
fill_height=True,
|
280 |
+
chatbot=gr.Chatbot(height=600)
|
281 |
)
|
282 |
gr.Markdown("""
|
283 |
<div>
|
|
|
312 |
mistral_interface = gr.load(
|
313 |
name=mistral_model.value,
|
314 |
src=mistral_gradio.registry,
|
315 |
+
fill_height=True,
|
316 |
+
chatbot=gr.Chatbot(height=600)
|
317 |
)
|
318 |
|
319 |
def update_mistral_model(new_model):
|
320 |
return gr.load(
|
321 |
name=new_model,
|
322 |
src=mistral_gradio.registry,
|
323 |
+
fill_height=True,
|
324 |
+
chatbot=gr.Chatbot(height=600)
|
325 |
)
|
326 |
|
327 |
mistral_model.change(
|
|
|
354 |
fireworks_interface = gr.load(
|
355 |
name=fireworks_model.value,
|
356 |
src=fireworks_gradio.registry,
|
357 |
+
fill_height=True,
|
358 |
+
chatbot=gr.Chatbot(height=600)
|
359 |
)
|
360 |
|
361 |
def update_fireworks_model(new_model):
|
362 |
return gr.load(
|
363 |
name=new_model,
|
364 |
src=fireworks_gradio.registry,
|
365 |
+
fill_height=True,
|
366 |
+
chatbot=gr.Chatbot(height=600)
|
367 |
)
|
368 |
|
369 |
fireworks_model.change(
|