Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
BETTERVIEW
#14
by
jersuxs
- opened
This view is limited to 50 files because it contains too many changes.
See the raw diff here.
- .gitignore +52 -129
- .pre-commit-config.yaml +37 -5
- .vscode/extensions.json +0 -8
- .vscode/settings.json +17 -3
- README.md +2 -2
- app.py +67 -67
- app_allenai.py +26 -20
- app_cohere.py +0 -21
- app_compare.py +0 -210
- app_crew.py +0 -8
- app_deepseek.py +0 -23
- app_experimental.py +12 -15
- app_fireworks.py +1 -2
- app_replicate.py → app_flux.py +1 -2
- app_gemini.py +13 -14
- app_gemini_camera.py +0 -23
- app_gemini_coder.py +0 -23
- app_gemini_voice.py +0 -23
- app_groq.py +17 -13
- app_groq_coder.py +0 -23
- app_huggingface.py +39 -31
- app_hyperbolic.py +21 -14
- app_hyperbolic_coder.py +0 -20
- app_langchain.py +0 -23
- app_fal.py → app_ltx_video.py +1 -2
- app_lumaai.py +0 -7
- app_meta.py +0 -6
- app_mindsearch.py +0 -12
- app_minimax.py +0 -22
- app_minimax_coder.py +0 -23
- app_mistral.py +19 -15
- app_moondream.py +0 -13
- app_nvidia.py +44 -14
- app_omini.py +0 -10
- app_openai.py +26 -13
- app_openai_coder.py +0 -22
- app_openai_voice.py +0 -23
- app_paligemma.py +0 -78
- app_playai.py +3 -6
- app_qwen.py +1 -2
- app_sailor.py +0 -9
- app_sambanova.py +1 -5
- app_showui.py +0 -10
- app_smolagents.py +0 -19
- app_together.py +1 -2
- app_transformers.py +0 -11
- app_trellis.py +0 -10
- app_xai.py +1 -3
- pre-requirements.txt +1 -0
- pyproject.toml +55 -37
.gitignore
CHANGED
@@ -1,14 +1,8 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
# Byte-compiled / optimized / DLL files
|
4 |
__pycache__/
|
5 |
*.py[cod]
|
6 |
*$py.class
|
7 |
-
|
8 |
-
# C extensions
|
9 |
*.so
|
10 |
-
|
11 |
-
# Distribution / packaging
|
12 |
.Python
|
13 |
build/
|
14 |
develop-eggs/
|
@@ -22,141 +16,70 @@ parts/
|
|
22 |
sdist/
|
23 |
var/
|
24 |
wheels/
|
25 |
-
share/python-wheels/
|
26 |
*.egg-info/
|
27 |
.installed.cfg
|
28 |
*.egg
|
29 |
-
MANIFEST
|
30 |
|
31 |
-
#
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
-
#
|
38 |
-
|
39 |
-
|
|
|
|
|
40 |
|
41 |
-
#
|
42 |
htmlcov/
|
43 |
.tox/
|
44 |
-
.nox/
|
45 |
.coverage
|
46 |
.coverage.*
|
47 |
.cache
|
48 |
nosetests.xml
|
49 |
coverage.xml
|
50 |
*.cover
|
51 |
-
*.py,cover
|
52 |
.hypothesis/
|
53 |
-
.pytest_cache/
|
54 |
-
cover/
|
55 |
-
|
56 |
-
# Translations
|
57 |
-
*.mo
|
58 |
-
*.pot
|
59 |
-
|
60 |
-
# Django stuff:
|
61 |
-
*.log
|
62 |
-
local_settings.py
|
63 |
-
db.sqlite3
|
64 |
-
db.sqlite3-journal
|
65 |
-
|
66 |
-
# Flask stuff:
|
67 |
-
instance/
|
68 |
-
.webassets-cache
|
69 |
-
|
70 |
-
# Scrapy stuff:
|
71 |
-
.scrapy
|
72 |
-
|
73 |
-
# Sphinx documentation
|
74 |
-
docs/_build/
|
75 |
-
|
76 |
-
# PyBuilder
|
77 |
-
.pybuilder/
|
78 |
-
target/
|
79 |
-
|
80 |
-
# Jupyter Notebook
|
81 |
-
.ipynb_checkpoints
|
82 |
-
|
83 |
-
# IPython
|
84 |
-
profile_default/
|
85 |
-
ipython_config.py
|
86 |
-
|
87 |
-
# pyenv
|
88 |
-
# For a library or package, you might want to ignore these files since the code is
|
89 |
-
# intended to run in multiple environments; otherwise, check them in:
|
90 |
-
# .python-version
|
91 |
-
|
92 |
-
# pipenv
|
93 |
-
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
94 |
-
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
95 |
-
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
96 |
-
# install all needed dependencies.
|
97 |
-
#Pipfile.lock
|
98 |
-
|
99 |
-
# poetry
|
100 |
-
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
101 |
-
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
102 |
-
# commonly ignored for libraries.
|
103 |
-
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
104 |
-
#poetry.lock
|
105 |
-
|
106 |
-
# pdm
|
107 |
-
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
108 |
-
#pdm.lock
|
109 |
-
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
110 |
-
# in version control.
|
111 |
-
# https://pdm.fming.dev/#use-with-ide
|
112 |
-
.pdm.toml
|
113 |
-
|
114 |
-
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
115 |
-
__pypackages__/
|
116 |
|
117 |
-
#
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
.spyderproject
|
135 |
-
.spyproject
|
136 |
-
|
137 |
-
# Rope project settings
|
138 |
-
.ropeproject
|
139 |
-
|
140 |
-
# mkdocs documentation
|
141 |
-
/site
|
142 |
-
|
143 |
-
# mypy
|
144 |
-
.mypy_cache/
|
145 |
-
.dmypy.json
|
146 |
-
dmypy.json
|
147 |
-
|
148 |
-
# Pyre type checker
|
149 |
-
.pyre/
|
150 |
-
|
151 |
-
# pytype static type analyzer
|
152 |
-
.pytype/
|
153 |
-
|
154 |
-
# Cython debug symbols
|
155 |
-
cython_debug/
|
156 |
-
|
157 |
-
# PyCharm
|
158 |
-
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
159 |
-
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
160 |
-
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
161 |
-
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
162 |
-
#.idea/
|
|
|
1 |
+
# Python
|
|
|
|
|
2 |
__pycache__/
|
3 |
*.py[cod]
|
4 |
*$py.class
|
|
|
|
|
5 |
*.so
|
|
|
|
|
6 |
.Python
|
7 |
build/
|
8 |
develop-eggs/
|
|
|
16 |
sdist/
|
17 |
var/
|
18 |
wheels/
|
|
|
19 |
*.egg-info/
|
20 |
.installed.cfg
|
21 |
*.egg
|
|
|
22 |
|
23 |
+
# Virtual Environment
|
24 |
+
venv/
|
25 |
+
env/
|
26 |
+
ENV/
|
27 |
+
.env
|
28 |
+
.venv
|
29 |
+
env.bak/
|
30 |
+
venv.bak/
|
31 |
+
|
32 |
+
# IDE specific files
|
33 |
+
.idea/
|
34 |
+
.vscode/
|
35 |
+
*.swp
|
36 |
+
*.swo
|
37 |
+
.DS_Store
|
38 |
+
.vs/
|
39 |
+
|
40 |
+
# Jupyter Notebook
|
41 |
+
.ipynb_checkpoints
|
42 |
+
|
43 |
+
# Gradio
|
44 |
+
flagged/
|
45 |
+
gradio_cached_examples/
|
46 |
+
|
47 |
+
# Logs
|
48 |
+
*.log
|
49 |
+
logs/
|
50 |
+
*.out
|
51 |
|
52 |
+
# Local development settings
|
53 |
+
.env.local
|
54 |
+
.env.development.local
|
55 |
+
.env.test.local
|
56 |
+
.env.production.local
|
57 |
|
58 |
+
# Coverage reports
|
59 |
htmlcov/
|
60 |
.tox/
|
|
|
61 |
.coverage
|
62 |
.coverage.*
|
63 |
.cache
|
64 |
nosetests.xml
|
65 |
coverage.xml
|
66 |
*.cover
|
|
|
67 |
.hypothesis/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
+
# Distribution / packaging
|
70 |
+
.Python
|
71 |
+
build/
|
72 |
+
develop-eggs/
|
73 |
+
dist/
|
74 |
+
downloads/
|
75 |
+
eggs/
|
76 |
+
.eggs/
|
77 |
+
lib/
|
78 |
+
lib64/
|
79 |
+
parts/
|
80 |
+
sdist/
|
81 |
+
var/
|
82 |
+
wheels/
|
83 |
+
*.egg-info/
|
84 |
+
.installed.cfg
|
85 |
+
*.egg
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.pre-commit-config.yaml
CHANGED
@@ -13,13 +13,18 @@ repos:
|
|
13 |
args: ["--fix=lf"]
|
14 |
- id: requirements-txt-fixer
|
15 |
- id: trailing-whitespace
|
16 |
-
- repo: https://github.com/
|
17 |
-
rev:
|
18 |
hooks:
|
19 |
-
- id:
|
20 |
-
args: ["--
|
|
|
|
|
|
|
|
|
|
|
21 |
- repo: https://github.com/pre-commit/mirrors-mypy
|
22 |
-
rev: v1.
|
23 |
hooks:
|
24 |
- id: mypy
|
25 |
args: ["--ignore-missing-imports"]
|
@@ -30,3 +35,30 @@ repos:
|
|
30 |
"types-PyYAML",
|
31 |
"types-pytz",
|
32 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
args: ["--fix=lf"]
|
14 |
- id: requirements-txt-fixer
|
15 |
- id: trailing-whitespace
|
16 |
+
- repo: https://github.com/myint/docformatter
|
17 |
+
rev: v1.7.5
|
18 |
hooks:
|
19 |
+
- id: docformatter
|
20 |
+
args: ["--in-place"]
|
21 |
+
- repo: https://github.com/pycqa/isort
|
22 |
+
rev: 5.13.2
|
23 |
+
hooks:
|
24 |
+
- id: isort
|
25 |
+
args: ["--profile", "black"]
|
26 |
- repo: https://github.com/pre-commit/mirrors-mypy
|
27 |
+
rev: v1.13.0
|
28 |
hooks:
|
29 |
- id: mypy
|
30 |
args: ["--ignore-missing-imports"]
|
|
|
35 |
"types-PyYAML",
|
36 |
"types-pytz",
|
37 |
]
|
38 |
+
- repo: https://github.com/psf/black
|
39 |
+
rev: 24.10.0
|
40 |
+
hooks:
|
41 |
+
- id: black
|
42 |
+
language_version: python3.10
|
43 |
+
args: ["--line-length", "119"]
|
44 |
+
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
45 |
+
rev: v0.7.4
|
46 |
+
hooks:
|
47 |
+
- id: ruff
|
48 |
+
- repo: https://github.com/kynan/nbstripout
|
49 |
+
rev: 0.8.1
|
50 |
+
hooks:
|
51 |
+
- id: nbstripout
|
52 |
+
args:
|
53 |
+
[
|
54 |
+
"--extra-keys",
|
55 |
+
"metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
|
56 |
+
]
|
57 |
+
- repo: https://github.com/nbQA-dev/nbQA
|
58 |
+
rev: 1.9.1
|
59 |
+
hooks:
|
60 |
+
- id: nbqa-black
|
61 |
+
- id: nbqa-pyupgrade
|
62 |
+
args: ["--py37-plus"]
|
63 |
+
- id: nbqa-isort
|
64 |
+
args: ["--float-to-top"]
|
.vscode/extensions.json
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"recommendations": [
|
3 |
-
"ms-python.python",
|
4 |
-
"charliermarsh.ruff",
|
5 |
-
"streetsidesoftware.code-spell-checker",
|
6 |
-
"tamasfe.even-better-toml"
|
7 |
-
]
|
8 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.vscode/settings.json
CHANGED
@@ -2,15 +2,29 @@
|
|
2 |
"editor.formatOnSave": true,
|
3 |
"files.insertFinalNewline": false,
|
4 |
"[python]": {
|
5 |
-
"editor.defaultFormatter": "
|
6 |
"editor.formatOnType": true,
|
7 |
"editor.codeActionsOnSave": {
|
8 |
-
"source.
|
9 |
}
|
10 |
},
|
11 |
"[jupyter]": {
|
12 |
"files.insertFinalNewline": false
|
13 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
"notebook.output.scrolling": true,
|
15 |
-
"notebook.
|
|
|
|
|
|
|
|
|
16 |
}
|
|
|
2 |
"editor.formatOnSave": true,
|
3 |
"files.insertFinalNewline": false,
|
4 |
"[python]": {
|
5 |
+
"editor.defaultFormatter": "ms-python.black-formatter",
|
6 |
"editor.formatOnType": true,
|
7 |
"editor.codeActionsOnSave": {
|
8 |
+
"source.organizeImports": "explicit"
|
9 |
}
|
10 |
},
|
11 |
"[jupyter]": {
|
12 |
"files.insertFinalNewline": false
|
13 |
},
|
14 |
+
"black-formatter.args": [
|
15 |
+
"--line-length=119"
|
16 |
+
],
|
17 |
+
"isort.args": ["--profile", "black"],
|
18 |
+
"flake8.args": [
|
19 |
+
"--max-line-length=119"
|
20 |
+
],
|
21 |
+
"ruff.lint.args": [
|
22 |
+
"--line-length=119"
|
23 |
+
],
|
24 |
"notebook.output.scrolling": true,
|
25 |
+
"notebook.formatOnCellExecution": true,
|
26 |
+
"notebook.formatOnSave.enabled": true,
|
27 |
+
"notebook.codeActionsOnSave": {
|
28 |
+
"source.organizeImports": "explicit"
|
29 |
+
}
|
30 |
}
|
README.md
CHANGED
@@ -4,10 +4,10 @@ emoji: 🏢
|
|
4 |
colorFrom: indigo
|
5 |
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 5.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
disable_embedding: true
|
11 |
---
|
12 |
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
4 |
colorFrom: indigo
|
5 |
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 5.7.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
disable_embedding: true
|
11 |
---
|
12 |
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
@@ -1,77 +1,77 @@
|
|
1 |
-
|
|
|
|
|
|
|
2 |
from app_claude import demo as demo_claude
|
3 |
-
from
|
4 |
-
from app_fal import demo as demo_fal
|
5 |
from app_fireworks import demo as demo_fireworks
|
6 |
-
from
|
7 |
-
from app_perplexity import demo as demo_perplexity
|
8 |
-
from app_playai import demo as demo_playai
|
9 |
-
from app_replicate import demo as demo_replicate
|
10 |
-
from app_sambanova import demo as demo_sambanova
|
11 |
-
from app_together import demo as demo_together
|
12 |
-
from app_xai import demo as demo_grok
|
13 |
-
from app_qwen import demo as demo_qwen
|
14 |
-
from app_crew import demo as demo_crew
|
15 |
-
from app_hyperbolic import demo as demo_hyperbolic
|
16 |
-
from app_openai import demo as demo_openai
|
17 |
-
from app_gemini_camera import demo as demo_gemini_camera
|
18 |
-
from app_gemini_coder import demo as demo_gemini_coder
|
19 |
from app_gemini import demo as demo_gemini
|
20 |
-
from app_gemini_voice import demo as demo_gemini_voice
|
21 |
-
from app_hyperbolic_coder import demo as demo_hyperbolic_coder
|
22 |
-
from app_smolagents import demo as demo_smolagents
|
23 |
from app_groq import demo as demo_groq
|
24 |
-
from
|
25 |
-
from
|
26 |
-
from
|
27 |
from app_mistral import demo as demo_mistral
|
28 |
-
from app_minimax import demo as demo_minimax
|
29 |
-
from app_minimax_coder import demo as demo_minimax_coder
|
30 |
from app_nvidia import demo as demo_nvidia
|
31 |
-
from
|
32 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
"
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
"
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
"
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
"
|
57 |
-
|
58 |
-
"
|
59 |
-
|
60 |
-
"Groq":
|
61 |
-
|
62 |
-
"
|
63 |
-
|
64 |
-
"
|
65 |
-
|
66 |
-
"
|
67 |
-
|
|
|
|
|
|
|
|
|
68 |
|
69 |
-
demo = get_app(
|
70 |
-
models=list(PROVIDERS.keys()),
|
71 |
-
default_model="Gemini Coder",
|
72 |
-
src=PROVIDERS,
|
73 |
-
dropdown_label="Select Provider",
|
74 |
-
)
|
75 |
|
76 |
if __name__ == "__main__":
|
77 |
-
demo.queue(api_open=False).launch(show_api=False)
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
from app_playai import demo as demo_playai
|
4 |
+
from app_allenai import demo as demo_allenai
|
5 |
from app_claude import demo as demo_claude
|
6 |
+
from app_experimental import demo as demo_experimental
|
|
|
7 |
from app_fireworks import demo as demo_fireworks
|
8 |
+
from app_flux import demo as demo_flux
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
from app_gemini import demo as demo_gemini
|
|
|
|
|
|
|
10 |
from app_groq import demo as demo_groq
|
11 |
+
from app_hyperbolic import demo as demo_hyperbolic
|
12 |
+
from app_ltx_video import demo as demo_ltx_video
|
13 |
+
from app_marco_o1 import demo as demo_marco_o1
|
14 |
from app_mistral import demo as demo_mistral
|
|
|
|
|
15 |
from app_nvidia import demo as demo_nvidia
|
16 |
+
from app_openai import demo as demo_openai
|
17 |
+
from app_perplexity import demo as demo_perplexity
|
18 |
+
from app_qwen import demo as demo_qwen
|
19 |
+
from app_sambanova import demo as demo_sambanova
|
20 |
+
from app_together import demo as demo_together
|
21 |
+
from app_xai import demo as demo_grok
|
22 |
+
|
23 |
+
with gr.Blocks(fill_height=True) as demo:
|
24 |
+
with gr.Tab("PlayAI"):
|
25 |
+
demo_playai.render()
|
26 |
+
with gr.Tab("Grok"):
|
27 |
+
demo_grok.render()
|
28 |
+
with gr.Tab("Hyperbolic"):
|
29 |
+
demo_hyperbolic.render()
|
30 |
+
gr.Markdown(
|
31 |
+
"""
|
32 |
+
<div>
|
33 |
+
<img src="https://storage.googleapis.com/public-arena-asset/hyperbolic_logo.png" alt="Hyperbolic Logo" style="height: 50px; margin-right: 10px;">
|
34 |
+
</div>
|
35 |
|
36 |
+
**Note:** This model is supported by Hyperbolic. Build your AI apps at [Hyperbolic](https://app.hyperbolic.xyz/).
|
37 |
+
"""
|
38 |
+
)
|
39 |
+
with gr.Tab("Gemini"):
|
40 |
+
demo_gemini.render()
|
41 |
+
with gr.Tab("ChatGPT"):
|
42 |
+
demo_openai.render()
|
43 |
+
with gr.Tab("Claude"):
|
44 |
+
demo_claude.render()
|
45 |
+
with gr.Tab("Qwen"):
|
46 |
+
demo_qwen.render()
|
47 |
+
with gr.Tab("Allen AI"):
|
48 |
+
demo_allenai.render()
|
49 |
+
with gr.Tab("Perplexity"):
|
50 |
+
demo_perplexity.render()
|
51 |
+
with gr.Tab("Experimental"):
|
52 |
+
demo_experimental.render()
|
53 |
+
with gr.Tab("Meta Llama"):
|
54 |
+
demo_sambanova.render()
|
55 |
+
gr.Markdown(
|
56 |
+
"**Note:** You need to use a SambaNova API key from [SambaNova Cloud](https://cloud.sambanova.ai/)."
|
57 |
+
)
|
58 |
+
with gr.Tab("Marco-o1"):
|
59 |
+
demo_marco_o1.render()
|
60 |
+
with gr.Tab("LTX Video"):
|
61 |
+
demo_ltx_video.render()
|
62 |
+
with gr.Tab("Groq"):
|
63 |
+
demo_groq.render()
|
64 |
+
with gr.Tab("Mistral"):
|
65 |
+
demo_mistral.render()
|
66 |
+
with gr.Tab("Fireworks"):
|
67 |
+
demo_fireworks.render()
|
68 |
+
with gr.Tab("Together"):
|
69 |
+
demo_together.render()
|
70 |
+
with gr.Tab("NVIDIA"):
|
71 |
+
demo_nvidia.render()
|
72 |
+
with gr.Tab("Flux"):
|
73 |
+
demo_flux.render()
|
74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
if __name__ == "__main__":
|
77 |
+
demo.queue(api_open=False).launch(ssr_mode=False, show_api=False)
|
app_allenai.py
CHANGED
@@ -1,8 +1,10 @@
|
|
1 |
-
import gradio as gr
|
2 |
from gradio_client import Client
|
|
|
3 |
|
4 |
-
MODELS = {
|
5 |
-
|
|
|
|
|
6 |
|
7 |
def create_chat_fn(client):
|
8 |
def chat(message, history):
|
@@ -14,48 +16,51 @@ def create_chat_fn(client):
|
|
14 |
top_k=40,
|
15 |
repetition_penalty=1.1,
|
16 |
top_p=0.95,
|
17 |
-
api_name="/chat"
|
18 |
)
|
19 |
return response
|
20 |
-
|
21 |
return chat
|
22 |
|
23 |
-
|
24 |
def set_client_for_session(model_name, request: gr.Request):
|
25 |
headers = {}
|
26 |
-
if request and hasattr(request,
|
27 |
-
x_ip_token = request.request.headers.get(
|
28 |
if x_ip_token:
|
29 |
headers["X-IP-Token"] = x_ip_token
|
30 |
-
|
31 |
return Client(MODELS[model_name], headers=headers)
|
32 |
|
33 |
-
|
34 |
def safe_chat_fn(message, history, client):
|
35 |
if client is None:
|
36 |
return "Error: Client not initialized. Please refresh the page."
|
37 |
return create_chat_fn(client)(message, history)
|
38 |
|
39 |
-
|
40 |
with gr.Blocks() as demo:
|
|
|
41 |
client = gr.State()
|
42 |
-
|
43 |
model_dropdown = gr.Dropdown(
|
44 |
-
choices=list(MODELS.keys()),
|
|
|
|
|
|
|
45 |
)
|
46 |
-
|
47 |
-
chat_interface = gr.ChatInterface(
|
48 |
-
|
|
|
|
|
|
|
49 |
# Update client when model changes
|
50 |
def update_model(model_name, request):
|
51 |
return set_client_for_session(model_name, request)
|
52 |
-
|
53 |
model_dropdown.change(
|
54 |
fn=update_model,
|
55 |
inputs=[model_dropdown],
|
56 |
outputs=[client],
|
57 |
)
|
58 |
-
|
59 |
# Initialize client on page load
|
60 |
demo.load(
|
61 |
fn=set_client_for_session,
|
@@ -63,5 +68,6 @@ with gr.Blocks() as demo:
|
|
63 |
outputs=client,
|
64 |
)
|
65 |
|
66 |
-
|
67 |
-
|
|
|
|
|
|
1 |
from gradio_client import Client
|
2 |
+
import gradio as gr
|
3 |
|
4 |
+
MODELS = {
|
5 |
+
"OLMo-2-1124-13B-Instruct": "akhaliq/olmo-anychat",
|
6 |
+
"Llama-3.1-Tulu-3-8B": "akhaliq/allen-test"
|
7 |
+
}
|
8 |
|
9 |
def create_chat_fn(client):
|
10 |
def chat(message, history):
|
|
|
16 |
top_k=40,
|
17 |
repetition_penalty=1.1,
|
18 |
top_p=0.95,
|
19 |
+
api_name="/chat"
|
20 |
)
|
21 |
return response
|
|
|
22 |
return chat
|
23 |
|
|
|
24 |
def set_client_for_session(model_name, request: gr.Request):
|
25 |
headers = {}
|
26 |
+
if request and hasattr(request, 'request') and hasattr(request.request, 'headers'):
|
27 |
+
x_ip_token = request.request.headers.get('x-ip-token')
|
28 |
if x_ip_token:
|
29 |
headers["X-IP-Token"] = x_ip_token
|
30 |
+
|
31 |
return Client(MODELS[model_name], headers=headers)
|
32 |
|
|
|
33 |
def safe_chat_fn(message, history, client):
|
34 |
if client is None:
|
35 |
return "Error: Client not initialized. Please refresh the page."
|
36 |
return create_chat_fn(client)(message, history)
|
37 |
|
|
|
38 |
with gr.Blocks() as demo:
|
39 |
+
|
40 |
client = gr.State()
|
41 |
+
|
42 |
model_dropdown = gr.Dropdown(
|
43 |
+
choices=list(MODELS.keys()),
|
44 |
+
value="OLMo-2-1124-13B-Instruct",
|
45 |
+
label="Select Model",
|
46 |
+
interactive=True
|
47 |
)
|
48 |
+
|
49 |
+
chat_interface = gr.ChatInterface(
|
50 |
+
fn=safe_chat_fn,
|
51 |
+
additional_inputs=[client]
|
52 |
+
)
|
53 |
+
|
54 |
# Update client when model changes
|
55 |
def update_model(model_name, request):
|
56 |
return set_client_for_session(model_name, request)
|
57 |
+
|
58 |
model_dropdown.change(
|
59 |
fn=update_model,
|
60 |
inputs=[model_dropdown],
|
61 |
outputs=[client],
|
62 |
)
|
63 |
+
|
64 |
# Initialize client on page load
|
65 |
demo.load(
|
66 |
fn=set_client_for_session,
|
|
|
68 |
outputs=client,
|
69 |
)
|
70 |
|
71 |
+
demo = demo
|
72 |
+
|
73 |
+
|
app_cohere.py
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import cohere_gradio
|
4 |
-
|
5 |
-
from utils import get_app
|
6 |
-
|
7 |
-
demo = get_app(
|
8 |
-
models=[
|
9 |
-
"command-r",
|
10 |
-
"command-r-08-2024",
|
11 |
-
"command-r-plus",
|
12 |
-
"command-r-plus-08-2024",
|
13 |
-
"command-r7b-12-2024",
|
14 |
-
],
|
15 |
-
default_model="command-r7b-12-2024",
|
16 |
-
src=cohere_gradio.registry,
|
17 |
-
accept_token=not os.getenv("COHERE_API_KEY"),
|
18 |
-
)
|
19 |
-
|
20 |
-
if __name__ == "__main__":
|
21 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_compare.py
DELETED
@@ -1,210 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import google.generativeai as genai
|
4 |
-
import gradio as gr
|
5 |
-
import openai
|
6 |
-
from anthropic import Anthropic
|
7 |
-
from openai import OpenAI # Add explicit OpenAI import
|
8 |
-
|
9 |
-
|
10 |
-
def get_all_models():
|
11 |
-
"""Get all available models from the registries."""
|
12 |
-
return [
|
13 |
-
"SambaNova: Meta-Llama-3.2-1B-Instruct",
|
14 |
-
"SambaNova: Meta-Llama-3.2-3B-Instruct",
|
15 |
-
"SambaNova: Llama-3.2-11B-Vision-Instruct",
|
16 |
-
"SambaNova: Llama-3.2-90B-Vision-Instruct",
|
17 |
-
"SambaNova: Meta-Llama-3.1-8B-Instruct",
|
18 |
-
"SambaNova: Meta-Llama-3.1-70B-Instruct",
|
19 |
-
"SambaNova: Meta-Llama-3.1-405B-Instruct",
|
20 |
-
"Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct",
|
21 |
-
"Hyperbolic: meta-llama/Llama-3.2-3B-Instruct",
|
22 |
-
"Hyperbolic: meta-llama/Meta-Llama-3.1-8B-Instruct",
|
23 |
-
"Hyperbolic: meta-llama/Meta-Llama-3.1-70B-Instruct",
|
24 |
-
"Hyperbolic: meta-llama/Meta-Llama-3-70B-Instruct",
|
25 |
-
"Hyperbolic: NousResearch/Hermes-3-Llama-3.1-70B",
|
26 |
-
"Hyperbolic: Qwen/Qwen2.5-72B-Instruct",
|
27 |
-
"Hyperbolic: deepseek-ai/DeepSeek-V2.5",
|
28 |
-
"Hyperbolic: meta-llama/Meta-Llama-3.1-405B-Instruct",
|
29 |
-
]
|
30 |
-
|
31 |
-
|
32 |
-
def generate_discussion_prompt(original_question: str, previous_responses: list[str]) -> str:
|
33 |
-
"""Generate a prompt for models to discuss and build upon previous
|
34 |
-
responses.
|
35 |
-
"""
|
36 |
-
prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
|
37 |
-
|
38 |
-
Previous responses from other AI models:
|
39 |
-
{chr(10).join(f"- {response}" for response in previous_responses)}
|
40 |
-
|
41 |
-
Please provide your perspective while:
|
42 |
-
1. Acknowledging key insights from previous responses
|
43 |
-
2. Adding any missing important points
|
44 |
-
3. Respectfully noting if you disagree with anything and explaining why
|
45 |
-
4. Building towards a complete answer
|
46 |
-
|
47 |
-
Keep your response focused and concise (max 3-4 paragraphs)."""
|
48 |
-
return prompt
|
49 |
-
|
50 |
-
|
51 |
-
def generate_consensus_prompt(original_question: str, discussion_history: list[str]) -> str:
|
52 |
-
"""Generate a prompt for final consensus building."""
|
53 |
-
return f"""Review this multi-AI discussion about: "{original_question}"
|
54 |
-
|
55 |
-
Discussion history:
|
56 |
-
{chr(10).join(discussion_history)}
|
57 |
-
|
58 |
-
As a final synthesizer, please:
|
59 |
-
1. Identify the key points where all models agreed
|
60 |
-
2. Explain how any disagreements were resolved
|
61 |
-
3. Present a clear, unified answer that represents our collective best understanding
|
62 |
-
4. Note any remaining uncertainties or caveats
|
63 |
-
|
64 |
-
Keep the final consensus concise but complete."""
|
65 |
-
|
66 |
-
|
67 |
-
def chat_with_openai(model: str, messages: list[dict], api_key: str | None) -> str:
|
68 |
-
import openai
|
69 |
-
|
70 |
-
client = openai.OpenAI(api_key=api_key)
|
71 |
-
response = client.chat.completions.create(model=model, messages=messages)
|
72 |
-
return response.choices[0].message.content
|
73 |
-
|
74 |
-
|
75 |
-
def chat_with_anthropic(messages: list[dict], api_key: str | None) -> str:
|
76 |
-
"""Chat with Anthropic's Claude model."""
|
77 |
-
client = Anthropic(api_key=api_key)
|
78 |
-
response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
|
79 |
-
return response.content[0].text
|
80 |
-
|
81 |
-
|
82 |
-
def chat_with_gemini(messages: list[dict], api_key: str | None) -> str:
|
83 |
-
"""Chat with Gemini Pro model."""
|
84 |
-
genai.configure(api_key=api_key)
|
85 |
-
model = genai.GenerativeModel("gemini-pro")
|
86 |
-
|
87 |
-
# Convert messages to Gemini format
|
88 |
-
gemini_messages = []
|
89 |
-
for msg in messages:
|
90 |
-
role = "user" if msg["role"] == "user" else "model"
|
91 |
-
gemini_messages.append({"role": role, "parts": [msg["content"]]})
|
92 |
-
|
93 |
-
response = model.generate_content([m["parts"][0] for m in gemini_messages])
|
94 |
-
return response.text
|
95 |
-
|
96 |
-
|
97 |
-
def chat_with_sambanova(
|
98 |
-
messages: list[dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
|
99 |
-
) -> str:
|
100 |
-
"""Chat with SambaNova's models using their OpenAI-compatible API."""
|
101 |
-
client = openai.OpenAI(
|
102 |
-
api_key=api_key,
|
103 |
-
base_url="https://api.sambanova.ai/v1",
|
104 |
-
)
|
105 |
-
|
106 |
-
response = client.chat.completions.create(
|
107 |
-
model=model_name,
|
108 |
-
messages=messages,
|
109 |
-
temperature=0.1,
|
110 |
-
top_p=0.1, # Use the specific model name passed in
|
111 |
-
)
|
112 |
-
return response.choices[0].message.content
|
113 |
-
|
114 |
-
|
115 |
-
def chat_with_hyperbolic(
|
116 |
-
messages: list[dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
117 |
-
) -> str:
|
118 |
-
"""Chat with Hyperbolic's models using their OpenAI-compatible API."""
|
119 |
-
client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
|
120 |
-
|
121 |
-
# Add system message to the start of the messages list
|
122 |
-
full_messages = [
|
123 |
-
{"role": "system", "content": "You are a helpful assistant. Be descriptive and clear."},
|
124 |
-
*messages,
|
125 |
-
]
|
126 |
-
|
127 |
-
response = client.chat.completions.create(
|
128 |
-
model=model_name, # Use the specific model name passed in
|
129 |
-
messages=full_messages,
|
130 |
-
temperature=0.7,
|
131 |
-
max_tokens=1024,
|
132 |
-
)
|
133 |
-
return response.choices[0].message.content
|
134 |
-
|
135 |
-
|
136 |
-
def multi_model_consensus(
|
137 |
-
question: str, selected_models: list[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
|
138 |
-
) -> list[tuple[str, str]]:
|
139 |
-
if not selected_models:
|
140 |
-
raise gr.Error("Please select at least one model to chat with.")
|
141 |
-
|
142 |
-
chat_history = []
|
143 |
-
progress(0, desc="Getting responses from all models...")
|
144 |
-
|
145 |
-
# Get responses from all models in parallel
|
146 |
-
for i, model in enumerate(selected_models):
|
147 |
-
provider, model_name = model.split(": ", 1)
|
148 |
-
progress((i + 1) / len(selected_models), desc=f"Getting response from {model}...")
|
149 |
-
|
150 |
-
try:
|
151 |
-
if provider == "Anthropic":
|
152 |
-
api_key = os.getenv("ANTHROPIC_API_KEY")
|
153 |
-
response = chat_with_anthropic(messages=[{"role": "user", "content": question}], api_key=api_key)
|
154 |
-
elif provider == "SambaNova":
|
155 |
-
api_key = os.getenv("SAMBANOVA_API_KEY")
|
156 |
-
response = chat_with_sambanova(
|
157 |
-
messages=[
|
158 |
-
{"role": "system", "content": "You are a helpful assistant"},
|
159 |
-
{"role": "user", "content": question},
|
160 |
-
],
|
161 |
-
api_key=api_key,
|
162 |
-
model_name=model_name,
|
163 |
-
)
|
164 |
-
elif provider == "Hyperbolic":
|
165 |
-
api_key = os.getenv("HYPERBOLIC_API_KEY")
|
166 |
-
response = chat_with_hyperbolic(
|
167 |
-
messages=[{"role": "user", "content": question}],
|
168 |
-
api_key=api_key,
|
169 |
-
model_name=model_name,
|
170 |
-
)
|
171 |
-
else: # Gemini
|
172 |
-
api_key = os.getenv("GEMINI_API_KEY")
|
173 |
-
response = chat_with_gemini(messages=[{"role": "user", "content": question}], api_key=api_key)
|
174 |
-
|
175 |
-
chat_history.append((model, response))
|
176 |
-
except Exception as e:
|
177 |
-
chat_history.append((model, f"Error: {e!s}"))
|
178 |
-
|
179 |
-
progress(1.0, desc="Done!")
|
180 |
-
return chat_history
|
181 |
-
|
182 |
-
|
183 |
-
with gr.Blocks() as demo:
|
184 |
-
gr.Markdown("# Model Response Comparison")
|
185 |
-
gr.Markdown("""Select multiple models to compare their responses""")
|
186 |
-
|
187 |
-
with gr.Row():
|
188 |
-
with gr.Column():
|
189 |
-
model_selector = gr.Dropdown(
|
190 |
-
choices=get_all_models(),
|
191 |
-
multiselect=True,
|
192 |
-
label="Select Models",
|
193 |
-
info="Choose models to compare",
|
194 |
-
value=["SambaNova: Llama-3.2-90B-Vision-Instruct", "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct"],
|
195 |
-
)
|
196 |
-
|
197 |
-
chatbot = gr.Chatbot(height=600, label="Model Responses")
|
198 |
-
msg = gr.Textbox(label="Prompt", placeholder="Ask a question to compare model responses...")
|
199 |
-
|
200 |
-
def respond(message, selected_models):
|
201 |
-
chat_history = multi_model_consensus(message, selected_models, rounds=1)
|
202 |
-
return chat_history
|
203 |
-
|
204 |
-
msg.submit(respond, [msg, model_selector], [chatbot])
|
205 |
-
|
206 |
-
for fn in demo.fns.values():
|
207 |
-
fn.api_name = False
|
208 |
-
|
209 |
-
if __name__ == "__main__":
|
210 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_crew.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
import ai_gradio
|
2 |
-
import gradio as gr
|
3 |
-
|
4 |
-
demo = gr.load(
|
5 |
-
name="crewai:gpt-4-turbo",
|
6 |
-
crew_type="article", # or 'support'
|
7 |
-
src=ai_gradio.registry,
|
8 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_deepseek.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import ai_gradio
|
2 |
-
|
3 |
-
from utils_ai_gradio import get_app
|
4 |
-
|
5 |
-
# Get the hyperbolic models but keep their full names for loading
|
6 |
-
DEEPSEEK_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("deepseek:")]
|
7 |
-
|
8 |
-
# Create display names without the prefix
|
9 |
-
DEEPSEEK_MODELS_DISPLAY = [k.replace("deepseek:", "") for k in DEEPSEEK_MODELS_FULL]
|
10 |
-
|
11 |
-
|
12 |
-
# Create and launch the interface using get_app utility
|
13 |
-
demo = get_app(
|
14 |
-
models=DEEPSEEK_MODELS_FULL, # Use the full names with prefix
|
15 |
-
default_model=DEEPSEEK_MODELS_FULL[-1],
|
16 |
-
dropdown_label="Select DeepSeek Model",
|
17 |
-
choices=DEEPSEEK_MODELS_DISPLAY, # Display names without prefix
|
18 |
-
fill_height=True,
|
19 |
-
coder=True,
|
20 |
-
)
|
21 |
-
|
22 |
-
if __name__ == "__main__":
|
23 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_experimental.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import os
|
2 |
import random
|
|
|
3 |
|
4 |
import google.generativeai as genai
|
5 |
import gradio as gr
|
@@ -30,10 +31,9 @@ def get_all_models():
|
|
30 |
]
|
31 |
|
32 |
|
33 |
-
def generate_discussion_prompt(original_question: str, previous_responses:
|
34 |
"""Generate a prompt for models to discuss and build upon previous
|
35 |
-
responses.
|
36 |
-
"""
|
37 |
prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
|
38 |
|
39 |
Previous responses from other AI models:
|
@@ -49,7 +49,7 @@ Keep your response focused and concise (max 3-4 paragraphs)."""
|
|
49 |
return prompt
|
50 |
|
51 |
|
52 |
-
def generate_consensus_prompt(original_question: str, discussion_history:
|
53 |
"""Generate a prompt for final consensus building."""
|
54 |
return f"""Review this multi-AI discussion about: "{original_question}"
|
55 |
|
@@ -65,7 +65,7 @@ As a final synthesizer, please:
|
|
65 |
Keep the final consensus concise but complete."""
|
66 |
|
67 |
|
68 |
-
def chat_with_openai(model: str, messages:
|
69 |
import openai
|
70 |
|
71 |
client = openai.OpenAI(api_key=api_key)
|
@@ -73,14 +73,14 @@ def chat_with_openai(model: str, messages: list[dict], api_key: str | None) -> s
|
|
73 |
return response.choices[0].message.content
|
74 |
|
75 |
|
76 |
-
def chat_with_anthropic(messages:
|
77 |
"""Chat with Anthropic's Claude model."""
|
78 |
client = Anthropic(api_key=api_key)
|
79 |
response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
|
80 |
return response.content[0].text
|
81 |
|
82 |
|
83 |
-
def chat_with_gemini(messages:
|
84 |
"""Chat with Gemini Pro model."""
|
85 |
genai.configure(api_key=api_key)
|
86 |
model = genai.GenerativeModel("gemini-pro")
|
@@ -96,7 +96,7 @@ def chat_with_gemini(messages: list[dict], api_key: str | None) -> str:
|
|
96 |
|
97 |
|
98 |
def chat_with_sambanova(
|
99 |
-
messages:
|
100 |
) -> str:
|
101 |
"""Chat with SambaNova's models using their OpenAI-compatible API."""
|
102 |
client = openai.OpenAI(
|
@@ -105,16 +105,13 @@ def chat_with_sambanova(
|
|
105 |
)
|
106 |
|
107 |
response = client.chat.completions.create(
|
108 |
-
model=model_name,
|
109 |
-
messages=messages,
|
110 |
-
temperature=0.1,
|
111 |
-
top_p=0.1, # Use the specific model name passed in
|
112 |
)
|
113 |
return response.choices[0].message.content
|
114 |
|
115 |
|
116 |
def chat_with_hyperbolic(
|
117 |
-
messages:
|
118 |
) -> str:
|
119 |
"""Chat with Hyperbolic's models using their OpenAI-compatible API."""
|
120 |
client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
|
@@ -135,7 +132,7 @@ def chat_with_hyperbolic(
|
|
135 |
|
136 |
|
137 |
def multi_model_consensus(
|
138 |
-
question: str, selected_models:
|
139 |
) -> list[tuple[str, str]]:
|
140 |
if not selected_models:
|
141 |
raise gr.Error("Please select at least one model to chat with.")
|
@@ -249,7 +246,7 @@ def multi_model_consensus(
|
|
249 |
messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
|
250 |
)
|
251 |
except Exception as e:
|
252 |
-
final_consensus = f"Error getting consensus from {model}: {e
|
253 |
|
254 |
chat_history.append(("Final Consensus", final_consensus))
|
255 |
|
|
|
1 |
import os
|
2 |
import random
|
3 |
+
from typing import Dict, List
|
4 |
|
5 |
import google.generativeai as genai
|
6 |
import gradio as gr
|
|
|
31 |
]
|
32 |
|
33 |
|
34 |
+
def generate_discussion_prompt(original_question: str, previous_responses: List[str]) -> str:
|
35 |
"""Generate a prompt for models to discuss and build upon previous
|
36 |
+
responses."""
|
|
|
37 |
prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
|
38 |
|
39 |
Previous responses from other AI models:
|
|
|
49 |
return prompt
|
50 |
|
51 |
|
52 |
+
def generate_consensus_prompt(original_question: str, discussion_history: List[str]) -> str:
|
53 |
"""Generate a prompt for final consensus building."""
|
54 |
return f"""Review this multi-AI discussion about: "{original_question}"
|
55 |
|
|
|
65 |
Keep the final consensus concise but complete."""
|
66 |
|
67 |
|
68 |
+
def chat_with_openai(model: str, messages: List[Dict], api_key: str | None) -> str:
|
69 |
import openai
|
70 |
|
71 |
client = openai.OpenAI(api_key=api_key)
|
|
|
73 |
return response.choices[0].message.content
|
74 |
|
75 |
|
76 |
+
def chat_with_anthropic(messages: List[Dict], api_key: str | None) -> str:
|
77 |
"""Chat with Anthropic's Claude model."""
|
78 |
client = Anthropic(api_key=api_key)
|
79 |
response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
|
80 |
return response.content[0].text
|
81 |
|
82 |
|
83 |
+
def chat_with_gemini(messages: List[Dict], api_key: str | None) -> str:
|
84 |
"""Chat with Gemini Pro model."""
|
85 |
genai.configure(api_key=api_key)
|
86 |
model = genai.GenerativeModel("gemini-pro")
|
|
|
96 |
|
97 |
|
98 |
def chat_with_sambanova(
|
99 |
+
messages: List[Dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
|
100 |
) -> str:
|
101 |
"""Chat with SambaNova's models using their OpenAI-compatible API."""
|
102 |
client = openai.OpenAI(
|
|
|
105 |
)
|
106 |
|
107 |
response = client.chat.completions.create(
|
108 |
+
model=model_name, messages=messages, temperature=0.1, top_p=0.1 # Use the specific model name passed in
|
|
|
|
|
|
|
109 |
)
|
110 |
return response.choices[0].message.content
|
111 |
|
112 |
|
113 |
def chat_with_hyperbolic(
|
114 |
+
messages: List[Dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
115 |
) -> str:
|
116 |
"""Chat with Hyperbolic's models using their OpenAI-compatible API."""
|
117 |
client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
|
|
|
132 |
|
133 |
|
134 |
def multi_model_consensus(
|
135 |
+
question: str, selected_models: List[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
|
136 |
) -> list[tuple[str, str]]:
|
137 |
if not selected_models:
|
138 |
raise gr.Error("Please select at least one model to chat with.")
|
|
|
246 |
messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
|
247 |
)
|
248 |
except Exception as e:
|
249 |
+
final_consensus = f"Error getting consensus from {model}: {str(e)}"
|
250 |
|
251 |
chat_history.append(("Final Consensus", final_consensus))
|
252 |
|
app_fireworks.py
CHANGED
@@ -8,9 +8,8 @@ demo = get_app(
|
|
8 |
models=[
|
9 |
"f1-preview",
|
10 |
"f1-mini-preview",
|
11 |
-
"llama-v3p3-70b-instruct",
|
12 |
],
|
13 |
-
default_model="
|
14 |
src=fireworks_gradio.registry,
|
15 |
accept_token=not os.getenv("FIREWORKS_API_KEY"),
|
16 |
)
|
|
|
8 |
models=[
|
9 |
"f1-preview",
|
10 |
"f1-mini-preview",
|
|
|
11 |
],
|
12 |
+
default_model="f1-preview",
|
13 |
src=fireworks_gradio.registry,
|
14 |
accept_token=not os.getenv("FIREWORKS_API_KEY"),
|
15 |
)
|
app_replicate.py → app_flux.py
RENAMED
@@ -8,9 +8,8 @@ demo = get_app(
|
|
8 |
"black-forest-labs/flux-canny-pro",
|
9 |
"black-forest-labs/flux-fill-pro",
|
10 |
"black-forest-labs/flux-depth-dev",
|
11 |
-
"tencent/hunyuan-video:140176772be3b423d14fdaf5403e6d4e38b85646ccad0c3fd2ed07c211f0cad1",
|
12 |
],
|
13 |
-
default_model="
|
14 |
src=replicate_gradio.registry,
|
15 |
)
|
16 |
|
|
|
8 |
"black-forest-labs/flux-canny-pro",
|
9 |
"black-forest-labs/flux-fill-pro",
|
10 |
"black-forest-labs/flux-depth-dev",
|
|
|
11 |
],
|
12 |
+
default_model="black-forest-labs/flux-depth-pro",
|
13 |
src=replicate_gradio.registry,
|
14 |
)
|
15 |
|
app_gemini.py
CHANGED
@@ -1,21 +1,20 @@
|
|
1 |
-
import
|
2 |
|
3 |
-
|
4 |
|
5 |
-
|
6 |
-
GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
|
7 |
|
8 |
-
# Create display names without the prefix
|
9 |
-
GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
|
10 |
-
|
11 |
-
# Create and launch the interface using get_app utility
|
12 |
demo = get_app(
|
13 |
-
models=
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
|
19 |
)
|
20 |
|
21 |
if __name__ == "__main__":
|
|
|
1 |
+
import os
|
2 |
|
3 |
+
import gemini_gradio
|
4 |
|
5 |
+
from utils import get_app
|
|
|
6 |
|
|
|
|
|
|
|
|
|
7 |
demo = get_app(
|
8 |
+
models=[
|
9 |
+
"gemini-1.5-flash",
|
10 |
+
"gemini-1.5-flash-8b",
|
11 |
+
"gemini-1.5-pro",
|
12 |
+
"gemini-exp-1114",
|
13 |
+
"gemini-exp-1121",
|
14 |
+
],
|
15 |
+
default_model="gemini-1.5-flash",
|
16 |
+
src=gemini_gradio.registry,
|
17 |
+
accept_token=not os.getenv("GEMINI_API_KEY"),
|
18 |
)
|
19 |
|
20 |
if __name__ == "__main__":
|
app_gemini_camera.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import ai_gradio
|
2 |
-
|
3 |
-
from utils_ai_gradio import get_app
|
4 |
-
|
5 |
-
# Get the Gemini models but keep their full names for loading
|
6 |
-
GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
|
7 |
-
|
8 |
-
# Create display names without the prefix
|
9 |
-
GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
|
10 |
-
|
11 |
-
# Create and launch the interface using get_app utility
|
12 |
-
demo = get_app(
|
13 |
-
models=GEMINI_MODELS_FULL, # Use the full names with prefix
|
14 |
-
default_model=GEMINI_MODELS_FULL[-2],
|
15 |
-
dropdown_label="Select Gemini Model",
|
16 |
-
choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
|
17 |
-
src=ai_gradio.registry,
|
18 |
-
camera=True,
|
19 |
-
fill_height=True,
|
20 |
-
)
|
21 |
-
|
22 |
-
if __name__ == "__main__":
|
23 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_gemini_coder.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import ai_gradio
|
2 |
-
|
3 |
-
from utils_ai_gradio import get_app
|
4 |
-
|
5 |
-
# Get the Gemini models but keep their full names for loading
|
6 |
-
GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
|
7 |
-
|
8 |
-
# Create display names without the prefix
|
9 |
-
GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
|
10 |
-
|
11 |
-
# Create and launch the interface using get_app utility
|
12 |
-
demo = get_app(
|
13 |
-
models=GEMINI_MODELS_FULL, # Use the full names with prefix
|
14 |
-
default_model=GEMINI_MODELS_FULL[-1],
|
15 |
-
dropdown_label="Select Gemini Model",
|
16 |
-
choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
|
17 |
-
src=ai_gradio.registry,
|
18 |
-
fill_height=True,
|
19 |
-
coder=True,
|
20 |
-
)
|
21 |
-
|
22 |
-
if __name__ == "__main__":
|
23 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_gemini_voice.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import ai_gradio
|
2 |
-
|
3 |
-
from utils_ai_gradio import get_app
|
4 |
-
|
5 |
-
# Get the Gemini models but keep their full names for loading
|
6 |
-
GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
|
7 |
-
|
8 |
-
# Create display names without the prefix
|
9 |
-
GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
|
10 |
-
|
11 |
-
# Create and launch the interface using get_app utility
|
12 |
-
demo = get_app(
|
13 |
-
models=GEMINI_MODELS_FULL, # Use the full names with prefix
|
14 |
-
default_model=GEMINI_MODELS_FULL[-2],
|
15 |
-
dropdown_label="Select Gemini Model",
|
16 |
-
choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
|
17 |
-
src=ai_gradio.registry,
|
18 |
-
enable_voice=True,
|
19 |
-
fill_height=True,
|
20 |
-
)
|
21 |
-
|
22 |
-
if __name__ == "__main__":
|
23 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_groq.py
CHANGED
@@ -1,20 +1,24 @@
|
|
1 |
-
import
|
2 |
|
3 |
-
|
4 |
|
5 |
-
|
6 |
-
GROQ_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("groq:")]
|
7 |
-
|
8 |
-
# Create display names without the prefix
|
9 |
-
GROQ_MODELS_DISPLAY = [k.replace("groq:", "") for k in GROQ_MODELS_FULL]
|
10 |
|
11 |
demo = get_app(
|
12 |
-
models=
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
)
|
19 |
|
20 |
if __name__ == "__main__":
|
|
|
1 |
+
import os
|
2 |
|
3 |
+
import groq_gradio
|
4 |
|
5 |
+
from utils import get_app
|
|
|
|
|
|
|
|
|
6 |
|
7 |
demo = get_app(
|
8 |
+
models=[
|
9 |
+
"llama3-groq-8b-8192-tool-use-preview",
|
10 |
+
"llama3-groq-70b-8192-tool-use-preview",
|
11 |
+
"llama-3.2-1b-preview",
|
12 |
+
"llama-3.2-3b-preview",
|
13 |
+
"llama-3.2-11b-vision-preview",
|
14 |
+
"llama-3.2-90b-vision-preview",
|
15 |
+
"mixtral-8x7b-32768",
|
16 |
+
"gemma2-9b-it",
|
17 |
+
"gemma-7b-it",
|
18 |
+
],
|
19 |
+
default_model="llama3-groq-70b-8192-tool-use-preview",
|
20 |
+
src=groq_gradio.registry,
|
21 |
+
accept_token=not os.getenv("GROQ_API_KEY"),
|
22 |
)
|
23 |
|
24 |
if __name__ == "__main__":
|
app_groq_coder.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import ai_gradio
|
2 |
-
|
3 |
-
from utils_ai_gradio import get_app
|
4 |
-
|
5 |
-
# Get the Groq models but keep their full names for loading
|
6 |
-
GROQ_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("groq:")]
|
7 |
-
|
8 |
-
# Create display names without the prefix
|
9 |
-
GROQ_MODELS_DISPLAY = [k.replace("groq:", "") for k in GROQ_MODELS_FULL]
|
10 |
-
|
11 |
-
# Create and launch the interface using get_app utility
|
12 |
-
demo = get_app(
|
13 |
-
models=GROQ_MODELS_FULL, # Use the full names with prefix
|
14 |
-
default_model=GROQ_MODELS_FULL[-2],
|
15 |
-
dropdown_label="Select Groq Model",
|
16 |
-
choices=GROQ_MODELS_DISPLAY, # Display names without prefix
|
17 |
-
fill_height=True,
|
18 |
-
coder=True,
|
19 |
-
)
|
20 |
-
|
21 |
-
|
22 |
-
if __name__ == "__main__":
|
23 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_huggingface.py
CHANGED
@@ -1,67 +1,75 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
-
|
3 |
|
4 |
-
MODELS = {"SmolVLM-Instruct": "akhaliq/SmolVLM-Instruct"}
|
5 |
|
|
|
|
|
|
|
6 |
|
7 |
def create_chat_fn(client):
|
8 |
-
def chat(message, history):
|
9 |
-
# Extract text and files from the message
|
10 |
-
text = message.get("text", "")
|
11 |
-
files = message.get("files", [])
|
12 |
-
|
13 |
-
# Handle file uploads if present
|
14 |
-
processed_files = [handle_file(f) for f in files]
|
15 |
-
|
16 |
response = client.predict(
|
17 |
-
message={"text":
|
18 |
system_prompt="You are a helpful AI assistant.",
|
19 |
temperature=0.7,
|
20 |
max_new_tokens=1024,
|
21 |
top_k=40,
|
22 |
repetition_penalty=1.1,
|
23 |
top_p=0.95,
|
24 |
-
api_name="/chat"
|
25 |
)
|
26 |
return response
|
27 |
-
|
28 |
return chat
|
29 |
|
30 |
-
|
31 |
def set_client_for_session(model_name, request: gr.Request):
|
32 |
headers = {}
|
33 |
-
if request and hasattr(request,
|
34 |
-
x_ip_token = request.headers.get(
|
35 |
if x_ip_token:
|
36 |
headers["X-IP-Token"] = x_ip_token
|
37 |
-
|
38 |
return Client(MODELS[model_name], headers=headers)
|
39 |
|
40 |
-
|
41 |
def safe_chat_fn(message, history, client):
|
42 |
if client is None:
|
43 |
return "Error: Client not initialized. Please refresh the page."
|
44 |
try:
|
45 |
return create_chat_fn(client)(message, history)
|
46 |
except Exception as e:
|
47 |
-
print(f"Error during chat: {e
|
48 |
-
return f"Error during chat: {e
|
49 |
-
|
50 |
|
51 |
with gr.Blocks() as demo:
|
|
|
52 |
client = gr.State()
|
53 |
-
|
54 |
model_dropdown = gr.Dropdown(
|
55 |
-
choices=list(MODELS.keys()),
|
|
|
|
|
|
|
56 |
)
|
57 |
-
|
58 |
-
chat_interface = gr.ChatInterface(
|
59 |
-
|
|
|
|
|
|
|
|
|
60 |
# Update client when model changes
|
61 |
-
model_dropdown.change(
|
62 |
-
|
|
|
|
|
|
|
|
|
63 |
# Initialize client on page load
|
64 |
-
demo.load(
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
-
if __name__ == "__main__":
|
67 |
-
demo.launch()
|
|
|
1 |
+
from gradio_client import Client
|
2 |
import gradio as gr
|
3 |
+
import os
|
4 |
|
|
|
5 |
|
6 |
+
MODELS = {
|
7 |
+
"SmolVLM-Instruct": "akhaliq/SmolVLM-Instruct"
|
8 |
+
}
|
9 |
|
10 |
def create_chat_fn(client):
|
11 |
+
def chat(message, history, files=[]):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
response = client.predict(
|
13 |
+
message={"text": message, "files": files},
|
14 |
system_prompt="You are a helpful AI assistant.",
|
15 |
temperature=0.7,
|
16 |
max_new_tokens=1024,
|
17 |
top_k=40,
|
18 |
repetition_penalty=1.1,
|
19 |
top_p=0.95,
|
20 |
+
api_name="/chat"
|
21 |
)
|
22 |
return response
|
|
|
23 |
return chat
|
24 |
|
|
|
25 |
def set_client_for_session(model_name, request: gr.Request):
|
26 |
headers = {}
|
27 |
+
if request and hasattr(request, 'headers'):
|
28 |
+
x_ip_token = request.headers.get('x-ip-token')
|
29 |
if x_ip_token:
|
30 |
headers["X-IP-Token"] = x_ip_token
|
31 |
+
|
32 |
return Client(MODELS[model_name], headers=headers)
|
33 |
|
|
|
34 |
def safe_chat_fn(message, history, client):
|
35 |
if client is None:
|
36 |
return "Error: Client not initialized. Please refresh the page."
|
37 |
try:
|
38 |
return create_chat_fn(client)(message, history)
|
39 |
except Exception as e:
|
40 |
+
print(f"Error during chat: {str(e)}")
|
41 |
+
return f"Error during chat: {str(e)}"
|
|
|
42 |
|
43 |
with gr.Blocks() as demo:
|
44 |
+
|
45 |
client = gr.State()
|
46 |
+
|
47 |
model_dropdown = gr.Dropdown(
|
48 |
+
choices=list(MODELS.keys()),
|
49 |
+
value="SmolVLM-Instruct",
|
50 |
+
label="Select Model",
|
51 |
+
interactive=True
|
52 |
)
|
53 |
+
|
54 |
+
chat_interface = gr.ChatInterface(
|
55 |
+
fn=safe_chat_fn,
|
56 |
+
additional_inputs=[client],
|
57 |
+
multimodal=True
|
58 |
+
)
|
59 |
+
|
60 |
# Update client when model changes
|
61 |
+
model_dropdown.change(
|
62 |
+
fn=set_client_for_session,
|
63 |
+
inputs=[model_dropdown],
|
64 |
+
outputs=[client]
|
65 |
+
)
|
66 |
+
|
67 |
# Initialize client on page load
|
68 |
+
demo.load(
|
69 |
+
fn=set_client_for_session,
|
70 |
+
inputs=[gr.State("SmolVLM-Instruct")],
|
71 |
+
outputs=[client]
|
72 |
+
)
|
73 |
+
|
74 |
+
demo = demo
|
75 |
|
|
|
|
app_hyperbolic.py
CHANGED
@@ -1,19 +1,26 @@
|
|
1 |
-
import
|
2 |
|
3 |
-
|
4 |
|
5 |
-
|
6 |
-
HYPERBOLIC_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("hyperbolic:")]
|
7 |
|
8 |
-
# Create display names without the prefix
|
9 |
-
HYPERBOLIC_MODELS_DISPLAY = [k.replace("hyperbolic:", "") for k in HYPERBOLIC_MODELS_FULL]
|
10 |
-
|
11 |
-
|
12 |
-
# Create and launch the interface using get_app utility
|
13 |
demo = get_app(
|
14 |
-
models=
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
)
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
|
3 |
+
import hyperbolic_gradio
|
4 |
|
5 |
+
from utils import get_app
|
|
|
6 |
|
|
|
|
|
|
|
|
|
|
|
7 |
demo = get_app(
|
8 |
+
models=[
|
9 |
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
10 |
+
"meta-llama/Llama-3.2-3B-Instruct",
|
11 |
+
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
12 |
+
"meta-llama/Meta-Llama-3.1-70B-Instruct",
|
13 |
+
"meta-llama/Meta-Llama-3-70B-Instruct",
|
14 |
+
"NousResearch/Hermes-3-Llama-3.1-70B",
|
15 |
+
"Qwen/Qwen2.5-72B-Instruct",
|
16 |
+
"deepseek-ai/DeepSeek-V2.5",
|
17 |
+
"meta-llama/Meta-Llama-3.1-405B-Instruct",
|
18 |
+
"Qwen/QwQ-32B-Preview",
|
19 |
+
],
|
20 |
+
default_model="Qwen/QwQ-32B-Preview",
|
21 |
+
src=hyperbolic_gradio.registry,
|
22 |
+
accept_token=not os.getenv("HYPERBOLIC_API_KEY"),
|
23 |
)
|
24 |
+
|
25 |
+
if __name__ == "__main__":
|
26 |
+
demo.launch()
|
app_hyperbolic_coder.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
import ai_gradio
|
2 |
-
|
3 |
-
from utils_ai_gradio import get_app
|
4 |
-
|
5 |
-
# Get the hyperbolic models but keep their full names for loading
|
6 |
-
HYPERBOLIC_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("hyperbolic:")]
|
7 |
-
|
8 |
-
# Create display names without the prefix
|
9 |
-
HYPERBOLIC_MODELS_DISPLAY = [k.replace("hyperbolic:", "") for k in HYPERBOLIC_MODELS_FULL]
|
10 |
-
|
11 |
-
|
12 |
-
# Create and launch the interface using get_app utility
|
13 |
-
demo = get_app(
|
14 |
-
models=HYPERBOLIC_MODELS_FULL, # Use the full names with prefix
|
15 |
-
default_model=HYPERBOLIC_MODELS_FULL[-1],
|
16 |
-
dropdown_label="Select Hyperbolic Model",
|
17 |
-
choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix
|
18 |
-
fill_height=True,
|
19 |
-
coder=True,
|
20 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_langchain.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import ai_gradio
|
2 |
-
|
3 |
-
from utils_ai_gradio import get_app
|
4 |
-
|
5 |
-
# Get the hyperbolic models but keep their full names for loading
|
6 |
-
LANGCHAIN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("langchain:")]
|
7 |
-
|
8 |
-
# Create display names without the prefix
|
9 |
-
LANGCHAIN_MODELS_DISPLAY = [k.replace("langchain:", "") for k in LANGCHAIN_MODELS_FULL]
|
10 |
-
|
11 |
-
|
12 |
-
# Create and launch the interface using get_app utility
|
13 |
-
demo = get_app(
|
14 |
-
models=LANGCHAIN_MODELS_FULL, # Use the full names with prefix
|
15 |
-
default_model=LANGCHAIN_MODELS_FULL[0],
|
16 |
-
dropdown_label="Select Langchain Model",
|
17 |
-
choices=LANGCHAIN_MODELS_DISPLAY, # Display names without prefix
|
18 |
-
fill_height=True,
|
19 |
-
)
|
20 |
-
|
21 |
-
if __name__ == "__main__":
|
22 |
-
demo.launch()
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_fal.py → app_ltx_video.py
RENAMED
@@ -6,9 +6,8 @@ demo = get_app(
|
|
6 |
models=[
|
7 |
"fal-ai/ltx-video",
|
8 |
"fal-ai/ltx-video/image-to-video",
|
9 |
-
"fal-ai/luma-photon",
|
10 |
],
|
11 |
-
default_model="fal-ai/
|
12 |
src=fal_gradio.registry,
|
13 |
)
|
14 |
|
|
|
6 |
models=[
|
7 |
"fal-ai/ltx-video",
|
8 |
"fal-ai/ltx-video/image-to-video",
|
|
|
9 |
],
|
10 |
+
default_model="fal-ai/ltx-video/image-to-video",
|
11 |
src=fal_gradio.registry,
|
12 |
)
|
13 |
|
app_lumaai.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import lumaai_gradio
|
3 |
-
|
4 |
-
demo = gr.load(
|
5 |
-
name="dream-machine",
|
6 |
-
src=lumaai_gradio.registry,
|
7 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_meta.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
demo = gr.load("models/meta-llama/Llama-3.3-70B-Instruct")
|
4 |
-
|
5 |
-
if __name__ == "__main__":
|
6 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_mindsearch.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
# Load the Gradio space
|
4 |
-
demo = gr.load(name="internlm/MindSearch", src="spaces")
|
5 |
-
|
6 |
-
# Disable API access for all functions
|
7 |
-
if hasattr(demo, "fns"):
|
8 |
-
for fn in demo.fns.values():
|
9 |
-
fn.api_name = False
|
10 |
-
|
11 |
-
if __name__ == "__main__":
|
12 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_minimax.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
import ai_gradio
|
2 |
-
|
3 |
-
from utils_ai_gradio import get_app
|
4 |
-
|
5 |
-
# Get the hyperbolic models but keep their full names for loading
|
6 |
-
MINIMAX_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("minimax:")]
|
7 |
-
|
8 |
-
# Create display names without the prefix
|
9 |
-
MINIMAX_MODELS_DISPLAY = [k.replace("minimax:", "") for k in MINIMAX_MODELS_FULL]
|
10 |
-
|
11 |
-
|
12 |
-
# Create and launch the interface using get_app utility
|
13 |
-
demo = get_app(
|
14 |
-
models=MINIMAX_MODELS_FULL, # Use the full names with prefix
|
15 |
-
default_model=MINIMAX_MODELS_FULL[0],
|
16 |
-
dropdown_label="Select Minimax Model",
|
17 |
-
choices=MINIMAX_MODELS_DISPLAY, # Display names without prefix
|
18 |
-
fill_height=True,
|
19 |
-
)
|
20 |
-
|
21 |
-
if __name__ == "__main__":
|
22 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_minimax_coder.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import ai_gradio
|
2 |
-
|
3 |
-
from utils_ai_gradio import get_app
|
4 |
-
|
5 |
-
# Get the hyperbolic models but keep their full names for loading
|
6 |
-
MINIMAX_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("minimax:")]
|
7 |
-
|
8 |
-
# Create display names without the prefix
|
9 |
-
MINIMAX_MODELS_DISPLAY = [k.replace("minimax:", "") for k in MINIMAX_MODELS_FULL]
|
10 |
-
|
11 |
-
|
12 |
-
# Create and launch the interface using get_app utility
|
13 |
-
demo = get_app(
|
14 |
-
models=MINIMAX_MODELS_FULL, # Use the full names with prefix
|
15 |
-
default_model=MINIMAX_MODELS_FULL[0],
|
16 |
-
dropdown_label="Select Minimax Model",
|
17 |
-
choices=MINIMAX_MODELS_DISPLAY, # Display names without prefix
|
18 |
-
fill_height=True,
|
19 |
-
coder=True
|
20 |
-
)
|
21 |
-
|
22 |
-
if __name__ == "__main__":
|
23 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_mistral.py
CHANGED
@@ -1,22 +1,26 @@
|
|
1 |
-
import
|
2 |
|
3 |
-
|
4 |
|
5 |
-
|
6 |
-
MISTRAL_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("mistral:")]
|
7 |
|
8 |
-
# Create display names without the prefix
|
9 |
-
MISTRAL_MODELS_DISPLAY = [k.replace("mistral:", "") for k in MISTRAL_MODELS_FULL]
|
10 |
-
|
11 |
-
|
12 |
-
# Create and launch the interface using get_app utility
|
13 |
demo = get_app(
|
14 |
-
models=
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
)
|
21 |
|
22 |
if __name__ == "__main__":
|
|
|
1 |
+
import os
|
2 |
|
3 |
+
import mistral_gradio
|
4 |
|
5 |
+
from utils import get_app
|
|
|
6 |
|
|
|
|
|
|
|
|
|
|
|
7 |
demo = get_app(
|
8 |
+
models=[
|
9 |
+
"mistral-large-latest",
|
10 |
+
"pixtral-large-latest",
|
11 |
+
"ministral-3b-latest",
|
12 |
+
"ministral-8b-latest",
|
13 |
+
"mistral-small-latest",
|
14 |
+
"codestral-latest",
|
15 |
+
"mistral-embed",
|
16 |
+
"mistral-moderation-latest",
|
17 |
+
"pixtral-12b-2409",
|
18 |
+
"open-mistral-nemo",
|
19 |
+
"open-codestral-mamba",
|
20 |
+
],
|
21 |
+
default_model="pixtral-large-latest",
|
22 |
+
src=mistral_gradio.registry,
|
23 |
+
accept_token=not os.getenv("MISTRAL_API_KEY"),
|
24 |
)
|
25 |
|
26 |
if __name__ == "__main__":
|
app_moondream.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
# Load the Gradio space
|
4 |
-
demo = gr.load(name="akhaliq/moondream", src="spaces")
|
5 |
-
|
6 |
-
|
7 |
-
# Disable API access for all functions
|
8 |
-
if hasattr(demo, "fns"):
|
9 |
-
for fn in demo.fns.values():
|
10 |
-
fn.api_name = False
|
11 |
-
|
12 |
-
if __name__ == "__main__":
|
13 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_nvidia.py
CHANGED
@@ -1,21 +1,51 @@
|
|
1 |
-
import
|
2 |
|
3 |
-
|
4 |
|
5 |
-
|
6 |
-
NVIDIA_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("nvidia:")]
|
7 |
|
8 |
-
# Create display names without the prefix
|
9 |
-
NVIDIA_MODELS_DISPLAY = [k.replace("nvidia:", "") for k in NVIDIA_MODELS_FULL]
|
10 |
-
|
11 |
-
|
12 |
-
# Create and launch the interface using get_app utility
|
13 |
demo = get_app(
|
14 |
-
models=
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
)
|
20 |
|
21 |
if __name__ == "__main__":
|
|
|
1 |
+
import os
|
2 |
|
3 |
+
import nvidia_gradio
|
4 |
|
5 |
+
from utils import get_app
|
|
|
6 |
|
|
|
|
|
|
|
|
|
|
|
7 |
demo = get_app(
|
8 |
+
models=[
|
9 |
+
"nvidia/llama3-chatqa-1.5-70b",
|
10 |
+
"nvidia/llama3-chatqa-1.5-8b",
|
11 |
+
"nvidia-nemotron-4-340b-instruct",
|
12 |
+
"meta/llama-3.1-70b-instruct",
|
13 |
+
"meta/codellama-70b",
|
14 |
+
"meta/llama2-70b",
|
15 |
+
"meta/llama3-8b",
|
16 |
+
"meta/llama3-70b",
|
17 |
+
"mistralai/codestral-22b-instruct-v0.1",
|
18 |
+
"mistralai/mathstral-7b-v0.1",
|
19 |
+
"mistralai/mistral-large-2-instruct",
|
20 |
+
"mistralai/mistral-7b-instruct",
|
21 |
+
"mistralai/mistral-7b-instruct-v0.3",
|
22 |
+
"mistralai/mixtral-8x7b-instruct",
|
23 |
+
"mistralai/mixtral-8x22b-instruct",
|
24 |
+
"mistralai/mistral-large",
|
25 |
+
"google/gemma-2b",
|
26 |
+
"google/gemma-7b",
|
27 |
+
"google/gemma-2-2b-it",
|
28 |
+
"google/gemma-2-9b-it",
|
29 |
+
"google/gemma-2-27b-it",
|
30 |
+
"google/codegemma-1.1-7b",
|
31 |
+
"google/codegemma-7b",
|
32 |
+
"google/recurrentgemma-2b",
|
33 |
+
"google/shieldgemma-9b",
|
34 |
+
"microsoft/phi-3-medium-128k-instruct",
|
35 |
+
"microsoft/phi-3-medium-4k-instruct",
|
36 |
+
"microsoft/phi-3-mini-128k-instruct",
|
37 |
+
"microsoft/phi-3-mini-4k-instruct",
|
38 |
+
"microsoft/phi-3-small-128k-instruct",
|
39 |
+
"microsoft/phi-3-small-8k-instruct",
|
40 |
+
"qwen/qwen2-7b-instruct",
|
41 |
+
"databricks/dbrx-instruct",
|
42 |
+
"deepseek-ai/deepseek-coder-6.7b-instruct",
|
43 |
+
"upstage/solar-10.7b-instruct",
|
44 |
+
"snowflake/arctic",
|
45 |
+
],
|
46 |
+
default_model="meta/llama-3.1-70b-instruct",
|
47 |
+
src=nvidia_gradio.registry,
|
48 |
+
accept_token=not os.getenv("NVIDIA_API_KEY"),
|
49 |
)
|
50 |
|
51 |
if __name__ == "__main__":
|
app_omini.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
# Load the Gradio space
|
4 |
-
demo = gr.load(name="Yuanshi/OminiControl", src="spaces")
|
5 |
-
|
6 |
-
|
7 |
-
# Disable API access for all functions
|
8 |
-
if hasattr(demo, "fns"):
|
9 |
-
for fn in demo.fns.values():
|
10 |
-
fn.api_name = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_openai.py
CHANGED
@@ -1,20 +1,33 @@
|
|
1 |
-
import
|
2 |
|
3 |
-
|
4 |
|
5 |
-
|
6 |
-
OPENAI_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openai:")]
|
7 |
|
8 |
-
# Create display names without the prefix
|
9 |
-
OPENAI_MODELS_DISPLAY = [k.replace("openai:", "") for k in OPENAI_MODELS_FULL]
|
10 |
-
|
11 |
-
# Create and launch the interface using get_app utility
|
12 |
demo = get_app(
|
13 |
-
models=
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
)
|
19 |
|
20 |
if __name__ == "__main__":
|
|
|
1 |
+
import os
|
2 |
|
3 |
+
import openai_gradio
|
4 |
|
5 |
+
from utils import get_app
|
|
|
6 |
|
|
|
|
|
|
|
|
|
7 |
demo = get_app(
|
8 |
+
models=[
|
9 |
+
"gpt-4o-2024-11-20",
|
10 |
+
"gpt-4o",
|
11 |
+
"gpt-4o-2024-08-06",
|
12 |
+
"gpt-4o-2024-05-13",
|
13 |
+
"chatgpt-4o-latest",
|
14 |
+
"gpt-4o-mini",
|
15 |
+
"gpt-4o-mini-2024-07-18",
|
16 |
+
"o1-preview",
|
17 |
+
"o1-preview-2024-09-12",
|
18 |
+
"o1-mini",
|
19 |
+
"o1-mini-2024-09-12",
|
20 |
+
"gpt-4-turbo",
|
21 |
+
"gpt-4-turbo-2024-04-09",
|
22 |
+
"gpt-4-turbo-preview",
|
23 |
+
"gpt-4-0125-preview",
|
24 |
+
"gpt-4-1106-preview",
|
25 |
+
"gpt-4",
|
26 |
+
"gpt-4-0613",
|
27 |
+
],
|
28 |
+
default_model="gpt-4o-2024-11-20",
|
29 |
+
src=openai_gradio.registry,
|
30 |
+
accept_token=not os.getenv("OPENAI_API_KEY"),
|
31 |
)
|
32 |
|
33 |
if __name__ == "__main__":
|
app_openai_coder.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
import ai_gradio
|
2 |
-
|
3 |
-
from utils_ai_gradio import get_app
|
4 |
-
|
5 |
-
# Get the OpenAI models but keep their full names for loading
|
6 |
-
OPENAI_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openai:")]
|
7 |
-
|
8 |
-
# Create display names without the prefix
|
9 |
-
OPENAI_MODELS_DISPLAY = [k.replace("openai:", "") for k in OPENAI_MODELS_FULL]
|
10 |
-
|
11 |
-
# Create and launch the interface using get_app utility
|
12 |
-
demo = get_app(
|
13 |
-
models=OPENAI_MODELS_FULL, # Use the full names with prefix
|
14 |
-
default_model=OPENAI_MODELS_FULL[5],
|
15 |
-
dropdown_label="Select OpenAI Model",
|
16 |
-
choices=OPENAI_MODELS_DISPLAY, # Display names without prefix
|
17 |
-
fill_height=True,
|
18 |
-
coder=True,
|
19 |
-
)
|
20 |
-
|
21 |
-
if __name__ == "__main__":
|
22 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_openai_voice.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import openai_gradio
|
4 |
-
|
5 |
-
from utils import get_app
|
6 |
-
|
7 |
-
demo = get_app(
|
8 |
-
models=[
|
9 |
-
"gpt-4o-realtime-preview",
|
10 |
-
"gpt-4o-realtime-preview-2024-12-17",
|
11 |
-
"gpt-4o-realtime-preview-2024-10-01",
|
12 |
-
"gpt-4o-mini-realtime-preview",
|
13 |
-
"gpt-4o-mini-realtime-preview-2024-12-17",
|
14 |
-
],
|
15 |
-
default_model="gpt-4o-mini-realtime-preview-2024-12-17",
|
16 |
-
src=openai_gradio.registry,
|
17 |
-
accept_token=not os.getenv("OPENAI_API_KEY"),
|
18 |
-
twilio_sid=os.getenv("TWILIO_SID_OPENAI"),
|
19 |
-
twilio_token=os.getenv("TWILIO_AUTH_OPENAI"),
|
20 |
-
)
|
21 |
-
|
22 |
-
if __name__ == "__main__":
|
23 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_paligemma.py
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from gradio_client import Client, handle_file
|
3 |
-
|
4 |
-
MODELS = {"Paligemma-10B": "akhaliq/paligemma2-10b-ft-docci-448"}
|
5 |
-
|
6 |
-
|
7 |
-
def create_chat_fn(client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p):
|
8 |
-
def chat(message, history):
|
9 |
-
text = message.get("text", "")
|
10 |
-
files = message.get("files", [])
|
11 |
-
processed_files = [handle_file(f) for f in files]
|
12 |
-
|
13 |
-
response = client.predict(
|
14 |
-
message={"text": text, "files": processed_files},
|
15 |
-
system_prompt=system_prompt,
|
16 |
-
temperature=temperature,
|
17 |
-
max_new_tokens=max_tokens,
|
18 |
-
top_k=top_k,
|
19 |
-
repetition_penalty=rep_penalty,
|
20 |
-
top_p=top_p,
|
21 |
-
api_name="/chat",
|
22 |
-
)
|
23 |
-
return response
|
24 |
-
|
25 |
-
return chat
|
26 |
-
|
27 |
-
|
28 |
-
def set_client_for_session(model_name, request: gr.Request):
|
29 |
-
headers = {}
|
30 |
-
if request and hasattr(request, "headers"):
|
31 |
-
x_ip_token = request.headers.get("x-ip-token")
|
32 |
-
if x_ip_token:
|
33 |
-
headers["X-IP-Token"] = x_ip_token
|
34 |
-
|
35 |
-
return Client(MODELS[model_name], headers=headers)
|
36 |
-
|
37 |
-
|
38 |
-
def safe_chat_fn(message, history, client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p):
|
39 |
-
if client is None:
|
40 |
-
return "Error: Client not initialized. Please refresh the page."
|
41 |
-
try:
|
42 |
-
return create_chat_fn(client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p)(
|
43 |
-
message, history
|
44 |
-
)
|
45 |
-
except Exception as e:
|
46 |
-
print(f"Error during chat: {e!s}")
|
47 |
-
return f"Error during chat: {e!s}"
|
48 |
-
|
49 |
-
|
50 |
-
with gr.Blocks() as demo:
|
51 |
-
client = gr.State()
|
52 |
-
|
53 |
-
with gr.Accordion("Advanced Settings", open=False):
|
54 |
-
system_prompt = gr.Textbox(value="You are a helpful AI assistant.", label="System Prompt")
|
55 |
-
with gr.Row():
|
56 |
-
temperature = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, label="Temperature")
|
57 |
-
top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, label="Top P")
|
58 |
-
with gr.Row():
|
59 |
-
top_k = gr.Slider(minimum=1, maximum=100, value=40, step=1, label="Top K")
|
60 |
-
rep_penalty = gr.Slider(minimum=1.0, maximum=2.0, value=1.1, label="Repetition Penalty")
|
61 |
-
max_tokens = gr.Slider(minimum=64, maximum=4096, value=1024, step=64, label="Max Tokens")
|
62 |
-
|
63 |
-
chat_interface = gr.ChatInterface(
|
64 |
-
fn=safe_chat_fn,
|
65 |
-
additional_inputs=[client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p],
|
66 |
-
multimodal=True,
|
67 |
-
)
|
68 |
-
|
69 |
-
# Initialize client on page load with default model
|
70 |
-
demo.load(fn=set_client_for_session, inputs=[gr.State("Paligemma-10B")], outputs=[client]) # Using default model
|
71 |
-
|
72 |
-
# Move the API access check here, after demo is defined
|
73 |
-
if hasattr(demo, "fns"):
|
74 |
-
for fn in demo.fns.values():
|
75 |
-
fn.api_name = False
|
76 |
-
|
77 |
-
if __name__ == "__main__":
|
78 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_playai.py
CHANGED
@@ -1,10 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
import playai_gradio
|
3 |
|
4 |
-
demo =
|
5 |
-
name=
|
6 |
src=playai_gradio.registry,
|
7 |
-
)
|
8 |
-
|
9 |
-
for fn in demo.fns.values():
|
10 |
-
fn.api_name = False
|
|
|
1 |
import gradio as gr
|
2 |
import playai_gradio
|
3 |
|
4 |
+
demo =gr.load(
|
5 |
+
name='PlayDialog',
|
6 |
src=playai_gradio.registry,
|
7 |
+
)
|
|
|
|
|
|
app_qwen.py
CHANGED
@@ -16,9 +16,8 @@ demo = get_app(
|
|
16 |
"qwen1.5-14b-chat",
|
17 |
"qwen1.5-7b-chat",
|
18 |
"qwq-32b-preview",
|
19 |
-
"qvq-72b-preview",
|
20 |
],
|
21 |
-
default_model="
|
22 |
src=dashscope_gradio.registry,
|
23 |
accept_token=not os.getenv("DASHSCOPE_API_KEY"),
|
24 |
)
|
|
|
16 |
"qwen1.5-14b-chat",
|
17 |
"qwen1.5-7b-chat",
|
18 |
"qwq-32b-preview",
|
|
|
19 |
],
|
20 |
+
default_model="qwq-32b-preview",
|
21 |
src=dashscope_gradio.registry,
|
22 |
accept_token=not os.getenv("DASHSCOPE_API_KEY"),
|
23 |
)
|
app_sailor.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import spaces
|
3 |
-
import transformers_gradio
|
4 |
-
|
5 |
-
demo = gr.load(name="sail/Sailor2-20B-Chat", src=transformers_gradio.registry)
|
6 |
-
demo.fn = spaces.GPU()(demo.fn)
|
7 |
-
|
8 |
-
for fn in demo.fns.values():
|
9 |
-
fn.api_name = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_sambanova.py
CHANGED
@@ -13,12 +13,8 @@ demo = get_app(
|
|
13 |
"Meta-Llama-3.1-8B-Instruct",
|
14 |
"Meta-Llama-3.1-70B-Instruct",
|
15 |
"Meta-Llama-3.1-405B-Instruct",
|
16 |
-
"Qwen2.5-72B-Instruct",
|
17 |
-
"Qwen2.5-Coder-32B-Instruct",
|
18 |
-
"Meta-Llama-3.3-70B-Instruct",
|
19 |
-
"QwQ-32B-Preview",
|
20 |
],
|
21 |
-
default_model="
|
22 |
src=sambanova_gradio.registry,
|
23 |
accept_token=not os.getenv("SAMBANOVA_API_KEY"),
|
24 |
multimodal=True,
|
|
|
13 |
"Meta-Llama-3.1-8B-Instruct",
|
14 |
"Meta-Llama-3.1-70B-Instruct",
|
15 |
"Meta-Llama-3.1-405B-Instruct",
|
|
|
|
|
|
|
|
|
16 |
],
|
17 |
+
default_model="Llama-3.2-90B-Vision-Instruct",
|
18 |
src=sambanova_gradio.registry,
|
19 |
accept_token=not os.getenv("SAMBANOVA_API_KEY"),
|
20 |
multimodal=True,
|
app_showui.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
# Load the Gradio space
|
4 |
-
demo = gr.load(name="showlab/ShowUI", src="spaces")
|
5 |
-
|
6 |
-
|
7 |
-
# Disable API access for all functions
|
8 |
-
if hasattr(demo, "fns"):
|
9 |
-
for fn in demo.fns.values():
|
10 |
-
fn.api_name = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_smolagents.py
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
import ai_gradio
|
2 |
-
|
3 |
-
from utils_ai_gradio import get_app
|
4 |
-
|
5 |
-
SMOLAGENTS_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("smolagents:")]
|
6 |
-
|
7 |
-
|
8 |
-
SMOLAGENTS_MODELS_DISPLAY = [k.replace("smolagents:", "") for k in SMOLAGENTS_MODELS_FULL]
|
9 |
-
|
10 |
-
demo = get_app(
|
11 |
-
models=SMOLAGENTS_MODELS_FULL, # Use the full names with prefix
|
12 |
-
default_model=SMOLAGENTS_MODELS_FULL[-1],
|
13 |
-
dropdown_label="Select SmolAgents Model",
|
14 |
-
choices=SMOLAGENTS_MODELS_DISPLAY, # Display names without prefix
|
15 |
-
fill_height=True,
|
16 |
-
)
|
17 |
-
|
18 |
-
if __name__ == "__main__":
|
19 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_together.py
CHANGED
@@ -40,9 +40,8 @@ demo = get_app(
|
|
40 |
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
41 |
"togethercomputer/StripedHyena-Nous-7B",
|
42 |
"upstage/SOLAR-10.7B-Instruct-v1.0",
|
43 |
-
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
44 |
],
|
45 |
-
default_model="meta-llama/Llama-3.
|
46 |
src=together_gradio.registry,
|
47 |
accept_token=not os.getenv("TOGETHER_API_KEY"),
|
48 |
multimodal=True,
|
|
|
40 |
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
41 |
"togethercomputer/StripedHyena-Nous-7B",
|
42 |
"upstage/SOLAR-10.7B-Instruct-v1.0",
|
|
|
43 |
],
|
44 |
+
default_model="meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
|
45 |
src=together_gradio.registry,
|
46 |
accept_token=not os.getenv("TOGETHER_API_KEY"),
|
47 |
multimodal=True,
|
app_transformers.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
demo = gr.load(name="akhaliq/phi-4", src="spaces")
|
4 |
-
|
5 |
-
# Disable API access for all functions
|
6 |
-
if hasattr(demo, "fns"):
|
7 |
-
for fn in demo.fns.values():
|
8 |
-
fn.api_name = False
|
9 |
-
|
10 |
-
if __name__ == "__main__":
|
11 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_trellis.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
# Load the Gradio space
|
4 |
-
demo = gr.load(name="JeffreyXiang/TRELLIS", src="spaces")
|
5 |
-
|
6 |
-
|
7 |
-
# Disable API access for all functions
|
8 |
-
if hasattr(demo, "fns"):
|
9 |
-
for fn in demo.fns.values():
|
10 |
-
fn.api_name = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_xai.py
CHANGED
@@ -8,10 +8,8 @@ demo = get_app(
|
|
8 |
models=[
|
9 |
"grok-beta",
|
10 |
"grok-vision-beta",
|
11 |
-
"grok-2-vision-1212",
|
12 |
-
"grok-2-1212",
|
13 |
],
|
14 |
-
default_model="grok-
|
15 |
src=xai_gradio.registry,
|
16 |
accept_token=not os.getenv("XAI_API_KEY"),
|
17 |
)
|
|
|
8 |
models=[
|
9 |
"grok-beta",
|
10 |
"grok-vision-beta",
|
|
|
|
|
11 |
],
|
12 |
+
default_model="grok-vision-beta",
|
13 |
src=xai_gradio.registry,
|
14 |
accept_token=not os.getenv("XAI_API_KEY"),
|
15 |
)
|
pre-requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
git+https://github.com/AK391/transformers-gradio.git
|
pyproject.toml
CHANGED
@@ -10,35 +10,20 @@ dependencies = [
|
|
10 |
"dashscope-gradio",
|
11 |
"fal-gradio",
|
12 |
"fireworks-gradio",
|
13 |
-
"gemini-gradio>=0.0.
|
14 |
-
"gradio
|
15 |
-
"gradio>=5.12.0",
|
16 |
"groq-gradio>=0.0.2",
|
17 |
-
"hyperbolic-gradio>=0.0.
|
18 |
-
"mistral-gradio",
|
19 |
"nvidia-gradio",
|
20 |
-
"openai-gradio>=0.0.
|
21 |
"perplexity-gradio>=0.0.1",
|
22 |
-
"python-dotenv>=1.0.1",
|
23 |
"replicate-gradio",
|
24 |
"sambanova-gradio>=0.1.9",
|
|
|
25 |
"together-gradio>=0.0.1",
|
26 |
-
"
|
27 |
"xai-gradio>=0.0.2",
|
28 |
-
"numba>=0.60.0",
|
29 |
-
"llvmlite>=0.43.0",
|
30 |
-
"librosa>=0.10.2.post1",
|
31 |
-
"twilio>=9.4.2",
|
32 |
-
"playai-gradio",
|
33 |
-
"lumaai-gradio",
|
34 |
-
"cohere-gradio",
|
35 |
-
"crewai>=0.95.0",
|
36 |
-
"instructor>=1.7.2",
|
37 |
-
"crewai-tools>=0.25.8",
|
38 |
-
"langchain>=0.3.14",
|
39 |
-
"chromadb>=0.5.23",
|
40 |
-
"openai>=1.55.0",
|
41 |
-
"ai-gradio[crewai,deepseek,gemini,groq,hyperbolic,openai,smolagents,transformers, langchain, mistral,minimax,nvidia]>=0.2.25",
|
42 |
]
|
43 |
|
44 |
[tool.uv.sources]
|
@@ -49,29 +34,62 @@ nvidia-gradio = { git = "https://github.com/AK391/nvidia-gradio.git" }
|
|
49 |
dashscope-gradio = { git = "https://github.com/AK391/dashscope-gradio.git" }
|
50 |
fal-gradio = { git = "https://github.com/AK391/fal-gradio.git" }
|
51 |
replicate-gradio = { git = "https://github.com/AK391/replicate-gradio.git" }
|
52 |
-
mistral-gradio = { git = "https://github.com/AK391/mistral-gradio.git" }
|
53 |
-
playai-gradio = { git = "https://github.com/AK391/playai-gradio.git" }
|
54 |
-
lumaai-gradio = { git = "https://github.com/AK391/lumaai-gradio.git" }
|
55 |
-
cohere-gradio = { git = "https://github.com/AK391/cohere-gradio.git" }
|
56 |
|
57 |
[tool.ruff]
|
|
|
|
|
|
|
58 |
line-length = 119
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
|
|
62 |
"E",
|
63 |
"F",
|
64 |
-
|
|
|
65 |
"N",
|
|
|
66 |
"S",
|
67 |
"T",
|
68 |
-
"UP",
|
69 |
"W",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
]
|
71 |
-
ignore = ["T201"]
|
72 |
-
unfixable = [
|
73 |
-
"F401", # unused-import
|
74 |
-
]
|
75 |
|
76 |
-
[tool.
|
77 |
-
|
|
|
|
|
|
|
|
|
|
10 |
"dashscope-gradio",
|
11 |
"fal-gradio",
|
12 |
"fireworks-gradio",
|
13 |
+
"gemini-gradio>=0.0.1",
|
14 |
+
"gradio>=5.7.0",
|
|
|
15 |
"groq-gradio>=0.0.2",
|
16 |
+
"hyperbolic-gradio>=0.0.4",
|
17 |
+
"mistral-gradio>=0.0.2",
|
18 |
"nvidia-gradio",
|
19 |
+
"openai-gradio>=0.0.4",
|
20 |
"perplexity-gradio>=0.0.1",
|
|
|
21 |
"replicate-gradio",
|
22 |
"sambanova-gradio>=0.1.9",
|
23 |
+
"spaces>=0.30.4",
|
24 |
"together-gradio>=0.0.1",
|
25 |
+
"torch==2.4.0",
|
26 |
"xai-gradio>=0.0.2",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
]
|
28 |
|
29 |
[tool.uv.sources]
|
|
|
34 |
dashscope-gradio = { git = "https://github.com/AK391/dashscope-gradio.git" }
|
35 |
fal-gradio = { git = "https://github.com/AK391/fal-gradio.git" }
|
36 |
replicate-gradio = { git = "https://github.com/AK391/replicate-gradio.git" }
|
|
|
|
|
|
|
|
|
37 |
|
38 |
[tool.ruff]
|
39 |
+
# Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
|
40 |
+
select = ["E", "F"]
|
41 |
+
ignore = ["E501"] # line too long (black is taking care of this)
|
42 |
line-length = 119
|
43 |
+
fixable = [
|
44 |
+
"A",
|
45 |
+
"B",
|
46 |
+
"C",
|
47 |
+
"D",
|
48 |
"E",
|
49 |
"F",
|
50 |
+
"G",
|
51 |
+
"I",
|
52 |
"N",
|
53 |
+
"Q",
|
54 |
"S",
|
55 |
"T",
|
|
|
56 |
"W",
|
57 |
+
"ANN",
|
58 |
+
"ARG",
|
59 |
+
"BLE",
|
60 |
+
"COM",
|
61 |
+
"DJ",
|
62 |
+
"DTZ",
|
63 |
+
"EM",
|
64 |
+
"ERA",
|
65 |
+
"EXE",
|
66 |
+
"FBT",
|
67 |
+
"ICN",
|
68 |
+
"INP",
|
69 |
+
"ISC",
|
70 |
+
"NPY",
|
71 |
+
"PD",
|
72 |
+
"PGH",
|
73 |
+
"PIE",
|
74 |
+
"PL",
|
75 |
+
"PT",
|
76 |
+
"PTH",
|
77 |
+
"PYI",
|
78 |
+
"RET",
|
79 |
+
"RSE",
|
80 |
+
"RUF",
|
81 |
+
"SIM",
|
82 |
+
"SLF",
|
83 |
+
"TCH",
|
84 |
+
"TID",
|
85 |
+
"TRY",
|
86 |
+
"UP",
|
87 |
+
"YTT",
|
88 |
]
|
|
|
|
|
|
|
|
|
89 |
|
90 |
+
[tool.isort]
|
91 |
+
profile = "black"
|
92 |
+
line_length = 119
|
93 |
+
|
94 |
+
[tool.black]
|
95 |
+
line-length = 119
|