Spaces:
Runtime error
Runtime error
add nvidia
Browse files- app.py +84 -0
- requirements.txt +2 -1
app.py
CHANGED
@@ -11,6 +11,7 @@ import fireworks_gradio
|
|
11 |
import cerebras_gradio
|
12 |
import groq_gradio
|
13 |
import together_gradio
|
|
|
14 |
|
15 |
|
16 |
|
@@ -523,6 +524,89 @@ with gr.Blocks(fill_height=True) as demo:
|
|
523 |
gr.Markdown("""
|
524 |
**Note:** You need a Together AI API key to use these models. Get one at [Together AI](https://www.together.ai/).
|
525 |
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
526 |
|
527 |
demo.launch(ssr_mode=False)
|
528 |
|
|
|
11 |
import cerebras_gradio
|
12 |
import groq_gradio
|
13 |
import together_gradio
|
14 |
+
import nvidia_gradio
|
15 |
|
16 |
|
17 |
|
|
|
524 |
gr.Markdown("""
|
525 |
**Note:** You need a Together AI API key to use these models. Get one at [Together AI](https://www.together.ai/).
|
526 |
""")
|
527 |
+
with gr.Tab("NVIDIA"):
|
528 |
+
with gr.Row():
|
529 |
+
nvidia_model = gr.Dropdown(
|
530 |
+
choices=[
|
531 |
+
# NVIDIA Models
|
532 |
+
'nvidia/llama3-chatqa-1.5-70b',
|
533 |
+
'nvidia/llama3-chatqa-1.5-8b',
|
534 |
+
'nvidia-nemotron-4-340b-instruct',
|
535 |
+
# Meta Models
|
536 |
+
'meta/codellama-70b',
|
537 |
+
'meta/llama2-70b',
|
538 |
+
'meta/llama3-8b',
|
539 |
+
'meta/llama3-70b',
|
540 |
+
# Mistral Models
|
541 |
+
'mistralai/codestral-22b-instruct-v0.1',
|
542 |
+
'mistralai/mathstral-7b-v0.1',
|
543 |
+
'mistralai/mistral-large-2-instruct',
|
544 |
+
'mistralai/mistral-7b-instruct',
|
545 |
+
'mistralai/mistral-7b-instruct-v0.3',
|
546 |
+
'mistralai/mixtral-8x7b-instruct',
|
547 |
+
'mistralai/mixtral-8x22b-instruct',
|
548 |
+
'mistralai/mistral-large',
|
549 |
+
# Google Models
|
550 |
+
'google/gemma-2b',
|
551 |
+
'google/gemma-7b',
|
552 |
+
'google/gemma-2-2b-it',
|
553 |
+
'google/gemma-2-9b-it',
|
554 |
+
'google/gemma-2-27b-it',
|
555 |
+
'google/codegemma-1.1-7b',
|
556 |
+
'google/codegemma-7b',
|
557 |
+
'google/recurrentgemma-2b',
|
558 |
+
'google/shieldgemma-9b',
|
559 |
+
# Microsoft Phi-3 Models
|
560 |
+
'microsoft/phi-3-medium-128k-instruct',
|
561 |
+
'microsoft/phi-3-medium-4k-instruct',
|
562 |
+
'microsoft/phi-3-mini-128k-instruct',
|
563 |
+
'microsoft/phi-3-mini-4k-instruct',
|
564 |
+
'microsoft/phi-3-small-128k-instruct',
|
565 |
+
'microsoft/phi-3-small-8k-instruct',
|
566 |
+
# Other Models
|
567 |
+
'qwen/qwen2-7b-instruct',
|
568 |
+
'databricks/dbrx-instruct',
|
569 |
+
'deepseek-ai/deepseek-coder-6.7b-instruct',
|
570 |
+
'upstage/solar-10.7b-instruct',
|
571 |
+
'snowflake/arctic'
|
572 |
+
],
|
573 |
+
value='nvidia/llama3-chatqa-1.5-70b', # Default to NVIDIA's flagship model
|
574 |
+
label="Select NVIDIA Model",
|
575 |
+
interactive=True
|
576 |
+
)
|
577 |
+
|
578 |
+
nvidia_interface = gr.load(
|
579 |
+
name=nvidia_model.value,
|
580 |
+
src=nvidia_gradio.registry,
|
581 |
+
accept_token=True, # Added token acceptance
|
582 |
+
fill_height=True
|
583 |
+
)
|
584 |
+
|
585 |
+
def update_nvidia_model(new_model):
|
586 |
+
return gr.load(
|
587 |
+
name=new_model,
|
588 |
+
src=nvidia_gradio.registry,
|
589 |
+
accept_token=True, # Added token acceptance
|
590 |
+
fill_height=True
|
591 |
+
)
|
592 |
+
|
593 |
+
nvidia_model.change(
|
594 |
+
fn=update_nvidia_model,
|
595 |
+
inputs=[nvidia_model],
|
596 |
+
outputs=[nvidia_interface]
|
597 |
+
)
|
598 |
+
|
599 |
+
gr.Markdown("""
|
600 |
+
**Note:** You need an NVIDIA AI Foundation API key to use these models. Get one at [NVIDIA AI Foundation](https://www.nvidia.com/en-us/ai-data-science/foundation-models/).
|
601 |
+
|
602 |
+
Models are organized by provider:
|
603 |
+
- **NVIDIA**: Native models including Llama3-ChatQA and Nemotron
|
604 |
+
- **Meta**: Llama family models
|
605 |
+
- **Mistral**: Various Mistral and Mixtral models
|
606 |
+
- **Google**: Gemma family models
|
607 |
+
- **Microsoft**: Phi-3 series
|
608 |
+
- And other providers including Qwen, Databricks, DeepSeek, etc.
|
609 |
+
""")
|
610 |
|
611 |
demo.launch(ssr_mode=False)
|
612 |
|
requirements.txt
CHANGED
@@ -9,4 +9,5 @@ mistral-gradio
|
|
9 |
git+https://github.com/AK391/fireworks-ai-gradio.git
|
10 |
git+https://github.com/gradio-app/cerebras_gradio.git
|
11 |
groq-gradio
|
12 |
-
together-gradio
|
|
|
|
9 |
git+https://github.com/AK391/fireworks-ai-gradio.git
|
10 |
git+https://github.com/gradio-app/cerebras_gradio.git
|
11 |
groq-gradio
|
12 |
+
together-gradio
|
13 |
+
git+https://github.com/AK391/nvidia-gradio.git
|