akhaliq HF staff commited on
Commit
fa93ad7
1 Parent(s): 7c3781c

move HF tab

Browse files
Files changed (1) hide show
  1. app.py +61 -61
app.py CHANGED
@@ -199,6 +199,67 @@ with gr.Blocks(fill_height=True) as demo:
199
  inputs=[grok_model],
200
  outputs=[grok_interface]
201
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
  with gr.Tab("Groq"):
203
  with gr.Row():
204
  groq_model = gr.Dropdown(
@@ -680,67 +741,6 @@ with gr.Blocks(fill_height=True) as demo:
680
  - **Microsoft**: Phi-3 series
681
  - And other providers including Qwen, Databricks, DeepSeek, etc.
682
  """)
683
- with gr.Tab("Hugging Face"):
684
- with gr.Row():
685
- hf_model = gr.Dropdown(
686
- choices=[
687
- # Latest Large Models
688
- 'Qwen/Qwen2.5-Coder-32B-Instruct',
689
- 'Qwen/Qwen2.5-72B-Instruct',
690
- 'meta-llama/Llama-3.1-70B-Instruct',
691
- 'mistralai/Mixtral-8x7B-Instruct-v0.1',
692
- # Mid-size Models
693
- 'meta-llama/Llama-3.1-8B-Instruct',
694
- 'google/gemma-2-9b-it',
695
- 'mistralai/Mistral-7B-v0.1',
696
- 'meta-llama/Llama-2-7b-chat-hf',
697
- # Smaller Models
698
- 'meta-llama/Llama-3.2-3B-Instruct',
699
- 'meta-llama/Llama-3.2-1B-Instruct',
700
- 'Qwen/Qwen2.5-1.5B-Instruct',
701
- 'microsoft/Phi-3.5-mini-instruct',
702
- 'HuggingFaceTB/SmolLM2-1.7B-Instruct',
703
- 'google/gemma-2-2b-it',
704
- # Base Models
705
- 'meta-llama/Llama-3.2-3B',
706
- 'meta-llama/Llama-3.2-1B',
707
- 'openai-community/gpt2'
708
- ],
709
- value='HuggingFaceTB/SmolLM2-1.7B-Instruct', # Default to a powerful model
710
- label="Select Hugging Face Model",
711
- interactive=True
712
- )
713
-
714
- hf_interface = gr.load(
715
- name=hf_model.value,
716
- src="models", # Use direct model loading from HF
717
- fill_height=True
718
- )
719
-
720
- def update_hf_model(new_model):
721
- return gr.load(
722
- name=new_model,
723
- src="models",
724
- fill_height=True
725
- )
726
-
727
- hf_model.change(
728
- fn=update_hf_model,
729
- inputs=[hf_model],
730
- outputs=[hf_interface]
731
- )
732
-
733
- gr.Markdown("""
734
- **Note:** These models are loaded directly from Hugging Face Hub. Some models may require authentication.
735
-
736
- Models are organized by size:
737
- - **Large Models**: 32B-72B parameters
738
- - **Mid-size Models**: 7B-9B parameters
739
- - **Smaller Models**: 1B-3B parameters
740
- - **Base Models**: Original architectures
741
-
742
- Visit [Hugging Face](https://huggingface.co/) to learn more about available models.
743
- """)
744
 
745
  demo.launch(ssr_mode=False)
746
 
 
199
  inputs=[grok_model],
200
  outputs=[grok_interface]
201
  )
202
+ with gr.Tab("Hugging Face"):
203
+ with gr.Row():
204
+ hf_model = gr.Dropdown(
205
+ choices=[
206
+ # Latest Large Models
207
+ 'Qwen/Qwen2.5-Coder-32B-Instruct',
208
+ 'Qwen/Qwen2.5-72B-Instruct',
209
+ 'meta-llama/Llama-3.1-70B-Instruct',
210
+ 'mistralai/Mixtral-8x7B-Instruct-v0.1',
211
+ # Mid-size Models
212
+ 'meta-llama/Llama-3.1-8B-Instruct',
213
+ 'google/gemma-2-9b-it',
214
+ 'mistralai/Mistral-7B-v0.1',
215
+ 'meta-llama/Llama-2-7b-chat-hf',
216
+ # Smaller Models
217
+ 'meta-llama/Llama-3.2-3B-Instruct',
218
+ 'meta-llama/Llama-3.2-1B-Instruct',
219
+ 'Qwen/Qwen2.5-1.5B-Instruct',
220
+ 'microsoft/Phi-3.5-mini-instruct',
221
+ 'HuggingFaceTB/SmolLM2-1.7B-Instruct',
222
+ 'google/gemma-2-2b-it',
223
+ # Base Models
224
+ 'meta-llama/Llama-3.2-3B',
225
+ 'meta-llama/Llama-3.2-1B',
226
+ 'openai-community/gpt2'
227
+ ],
228
+ value='HuggingFaceTB/SmolLM2-1.7B-Instruct', # Default to a powerful model
229
+ label="Select Hugging Face Model",
230
+ interactive=True
231
+ )
232
+
233
+ hf_interface = gr.load(
234
+ name=hf_model.value,
235
+ src="models", # Use direct model loading from HF
236
+ fill_height=True
237
+ )
238
+
239
+ def update_hf_model(new_model):
240
+ return gr.load(
241
+ name=new_model,
242
+ src="models",
243
+ fill_height=True
244
+ )
245
+
246
+ hf_model.change(
247
+ fn=update_hf_model,
248
+ inputs=[hf_model],
249
+ outputs=[hf_interface]
250
+ )
251
+
252
+ gr.Markdown("""
253
+ **Note:** These models are loaded directly from Hugging Face Hub. Some models may require authentication.
254
+
255
+ Models are organized by size:
256
+ - **Large Models**: 32B-72B parameters
257
+ - **Mid-size Models**: 7B-9B parameters
258
+ - **Smaller Models**: 1B-3B parameters
259
+ - **Base Models**: Original architectures
260
+
261
+ Visit [Hugging Face](https://huggingface.co/) to learn more about available models.
262
+ """)
263
  with gr.Tab("Groq"):
264
  with gr.Row():
265
  groq_model = gr.Dropdown(
 
741
  - **Microsoft**: Phi-3 series
742
  - And other providers including Qwen, Databricks, DeepSeek, etc.
743
  """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
 
745
  demo.launch(ssr_mode=False)
746