akhaliq HF staff commited on
Commit
5a2865e
1 Parent(s): 642c55c

dropdown fix

Browse files
Files changed (1) hide show
  1. app.py +34 -14
app.py CHANGED
@@ -41,12 +41,13 @@ with gr.Blocks(fill_height=True) as demo:
41
  )
42
 
43
  def update_llama_model(new_model):
44
- return gr.load(
45
  name=new_model,
46
  src=sambanova_gradio.registry,
47
  multimodal=True,
48
  fill_height=True
49
  )
 
50
 
51
  llama_model.change(
52
  fn=update_llama_model,
@@ -76,11 +77,12 @@ with gr.Blocks(fill_height=True) as demo:
76
  )
77
 
78
  def update_gemini_model(new_model):
79
- return gr.load(
80
  name=new_model,
81
  src=gemini_gradio.registry,
82
  fill_height=True
83
  )
 
84
 
85
  gemini_model.change(
86
  fn=update_gemini_model,
@@ -122,12 +124,13 @@ with gr.Blocks(fill_height=True) as demo:
122
  )
123
 
124
  def update_model(new_model):
125
- return gr.load(
126
  name=new_model,
127
  src=openai_gradio.registry,
128
  accept_token=True,
129
  fill_height=True
130
  )
 
131
 
132
  model_choice.change(
133
  fn=update_model,
@@ -157,12 +160,13 @@ with gr.Blocks(fill_height=True) as demo:
157
  )
158
 
159
  def update_claude_model(new_model):
160
- return gr.load(
161
  name=new_model,
162
  src=anthropic_gradio.registry,
163
  accept_token=True,
164
  fill_height=True
165
  )
 
166
 
167
  claude_model.change(
168
  fn=update_claude_model,
@@ -188,11 +192,12 @@ with gr.Blocks(fill_height=True) as demo:
188
  )
189
 
190
  def update_grok_model(new_model):
191
- return gr.load(
192
  name=new_model,
193
  src=xai_gradio.registry,
194
  fill_height=True
195
  )
 
196
 
197
  grok_model.change(
198
  fn=update_grok_model,
@@ -225,11 +230,12 @@ with gr.Blocks(fill_height=True) as demo:
225
  )
226
 
227
  def update_groq_model(new_model):
228
- return gr.load(
229
  name=new_model,
230
  src=groq_gradio.registry,
231
  fill_height=True
232
  )
 
233
 
234
  groq_model.change(
235
  fn=update_groq_model,
@@ -271,11 +277,12 @@ with gr.Blocks(fill_height=True) as demo:
271
  )
272
 
273
  def update_hyperbolic_model(new_model):
274
- return gr.load(
275
  name=new_model,
276
  src=hyperbolic_gradio.registry,
277
  fill_height=True
278
  )
 
279
 
280
  hyperbolic_model.change(
281
  fn=update_hyperbolic_model,
@@ -309,11 +316,12 @@ with gr.Blocks(fill_height=True) as demo:
309
  )
310
 
311
  def update_qwen_model(new_model):
312
- return gr.load(
313
  name=new_model,
314
  src=hyperbolic_gradio.registry,
315
  fill_height=True
316
  )
 
317
 
318
  qwen_model.change(
319
  fn=update_qwen_model,
@@ -356,12 +364,13 @@ with gr.Blocks(fill_height=True) as demo:
356
  )
357
 
358
  def update_perplexity_model(new_model):
359
- return gr.load(
360
  name=new_model,
361
  src=perplexity_gradio.registry,
362
  accept_token=True,
363
  fill_height=True
364
  )
 
365
 
366
  perplexity_model.change(
367
  fn=update_perplexity_model,
@@ -420,11 +429,12 @@ with gr.Blocks(fill_height=True) as demo:
420
  )
421
 
422
  def update_mistral_model(new_model):
423
- return gr.load(
424
  name=new_model,
425
  src=mistral_gradio.registry,
426
  fill_height=True
427
  )
 
428
 
429
  mistral_model.change(
430
  fn=update_mistral_model,
@@ -460,11 +470,12 @@ with gr.Blocks(fill_height=True) as demo:
460
  )
461
 
462
  def update_fireworks_model(new_model):
463
- return gr.load(
464
  name=new_model,
465
  src=fireworks_gradio.registry,
466
  fill_height=True
467
  )
 
468
 
469
  fireworks_model.change(
470
  fn=update_fireworks_model,
@@ -496,12 +507,19 @@ with gr.Blocks(fill_height=True) as demo:
496
  )
497
 
498
  def update_cerebras_model(new_model):
499
- return gr.load(
500
  name=new_model,
501
  src=cerebras_gradio.registry,
502
  accept_token=True, # Added token acceptance
503
  fill_height=True
504
  )
 
 
 
 
 
 
 
505
  with gr.Tab("Together"):
506
  with gr.Row():
507
  together_model = gr.Dropdown(
@@ -560,12 +578,13 @@ with gr.Blocks(fill_height=True) as demo:
560
  )
561
 
562
  def update_together_model(new_model):
563
- return gr.load(
564
  name=new_model,
565
  src=together_gradio.registry,
566
  multimodal=True,
567
  fill_height=True
568
  )
 
569
 
570
  together_model.change(
571
  fn=update_together_model,
@@ -636,12 +655,13 @@ with gr.Blocks(fill_height=True) as demo:
636
  )
637
 
638
  def update_nvidia_model(new_model):
639
- return gr.load(
640
  name=new_model,
641
  src=nvidia_gradio.registry,
642
  accept_token=True,
643
  fill_height=True
644
  )
 
645
 
646
  nvidia_model.change(
647
  fn=update_nvidia_model,
 
41
  )
42
 
43
  def update_llama_model(new_model):
44
+ llama_interface.load(
45
  name=new_model,
46
  src=sambanova_gradio.registry,
47
  multimodal=True,
48
  fill_height=True
49
  )
50
+ return llama_interface
51
 
52
  llama_model.change(
53
  fn=update_llama_model,
 
77
  )
78
 
79
  def update_gemini_model(new_model):
80
+ gemini_interface.load(
81
  name=new_model,
82
  src=gemini_gradio.registry,
83
  fill_height=True
84
  )
85
+ return gemini_interface
86
 
87
  gemini_model.change(
88
  fn=update_gemini_model,
 
124
  )
125
 
126
  def update_model(new_model):
127
+ chatgpt_interface.load(
128
  name=new_model,
129
  src=openai_gradio.registry,
130
  accept_token=True,
131
  fill_height=True
132
  )
133
+ return chatgpt_interface
134
 
135
  model_choice.change(
136
  fn=update_model,
 
160
  )
161
 
162
  def update_claude_model(new_model):
163
+ claude_interface.load(
164
  name=new_model,
165
  src=anthropic_gradio.registry,
166
  accept_token=True,
167
  fill_height=True
168
  )
169
+ return claude_interface
170
 
171
  claude_model.change(
172
  fn=update_claude_model,
 
192
  )
193
 
194
  def update_grok_model(new_model):
195
+ grok_interface.load(
196
  name=new_model,
197
  src=xai_gradio.registry,
198
  fill_height=True
199
  )
200
+ return grok_interface
201
 
202
  grok_model.change(
203
  fn=update_grok_model,
 
230
  )
231
 
232
  def update_groq_model(new_model):
233
+ groq_interface.load(
234
  name=new_model,
235
  src=groq_gradio.registry,
236
  fill_height=True
237
  )
238
+ return groq_interface
239
 
240
  groq_model.change(
241
  fn=update_groq_model,
 
277
  )
278
 
279
  def update_hyperbolic_model(new_model):
280
+ hyperbolic_interface.load(
281
  name=new_model,
282
  src=hyperbolic_gradio.registry,
283
  fill_height=True
284
  )
285
+ return hyperbolic_interface
286
 
287
  hyperbolic_model.change(
288
  fn=update_hyperbolic_model,
 
316
  )
317
 
318
  def update_qwen_model(new_model):
319
+ qwen_interface.load(
320
  name=new_model,
321
  src=hyperbolic_gradio.registry,
322
  fill_height=True
323
  )
324
+ return qwen_interface
325
 
326
  qwen_model.change(
327
  fn=update_qwen_model,
 
364
  )
365
 
366
  def update_perplexity_model(new_model):
367
+ perplexity_interface.load(
368
  name=new_model,
369
  src=perplexity_gradio.registry,
370
  accept_token=True,
371
  fill_height=True
372
  )
373
+ return perplexity_interface
374
 
375
  perplexity_model.change(
376
  fn=update_perplexity_model,
 
429
  )
430
 
431
  def update_mistral_model(new_model):
432
+ mistral_interface.load(
433
  name=new_model,
434
  src=mistral_gradio.registry,
435
  fill_height=True
436
  )
437
+ return mistral_interface
438
 
439
  mistral_model.change(
440
  fn=update_mistral_model,
 
470
  )
471
 
472
  def update_fireworks_model(new_model):
473
+ fireworks_interface.load(
474
  name=new_model,
475
  src=fireworks_gradio.registry,
476
  fill_height=True
477
  )
478
+ return fireworks_interface
479
 
480
  fireworks_model.change(
481
  fn=update_fireworks_model,
 
507
  )
508
 
509
  def update_cerebras_model(new_model):
510
+ cerebras_interface.load(
511
  name=new_model,
512
  src=cerebras_gradio.registry,
513
  accept_token=True, # Added token acceptance
514
  fill_height=True
515
  )
516
+ return cerebras_interface
517
+
518
+ cerebras_model.change(
519
+ fn=update_cerebras_model,
520
+ inputs=[cerebras_model],
521
+ outputs=[cerebras_interface]
522
+ )
523
  with gr.Tab("Together"):
524
  with gr.Row():
525
  together_model = gr.Dropdown(
 
578
  )
579
 
580
  def update_together_model(new_model):
581
+ together_interface.load(
582
  name=new_model,
583
  src=together_gradio.registry,
584
  multimodal=True,
585
  fill_height=True
586
  )
587
+ return together_interface
588
 
589
  together_model.change(
590
  fn=update_together_model,
 
655
  )
656
 
657
  def update_nvidia_model(new_model):
658
+ nvidia_interface.load(
659
  name=new_model,
660
  src=nvidia_gradio.registry,
661
  accept_token=True,
662
  fill_height=True
663
  )
664
+ return nvidia_interface
665
 
666
  nvidia_model.change(
667
  fn=update_nvidia_model,