Spaces:
Running
Running
Synced repo using 'sync_with_huggingface' Github Action
Browse files- client/html/index.html +16 -10
- g4f/Provider/Providers/Zeabur.py +50 -0
- g4f/Provider/__init__.py +1 -1
- g4f/models.py +28 -21
client/html/index.html
CHANGED
@@ -12,9 +12,20 @@
|
|
12 |
content="A conversational AI system that listens, learns, and challenges" />
|
13 |
<meta property="og:url" content="https://chat.acy.dev" />
|
14 |
<link rel="stylesheet" href="{{ url_for('bp.static', filename='css/style.css') }}" />
|
15 |
-
<link
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
<link rel="manifest" href="{{ url_for('bp.static', filename='img/site.webmanifest') }}" />
|
19 |
<link
|
20 |
rel="stylesheet"
|
@@ -44,9 +55,7 @@
|
|
44 |
</div>
|
45 |
<a class="info" href="https://github.com/ramonvc/gptfree-jailbreak-webui" target="_blank">
|
46 |
<i class="fa-brands fa-github"></i>
|
47 |
-
<span class="conversation-title">
|
48 |
-
Version: 0.0.10-Alpha
|
49 |
-
</span>
|
50 |
</a>
|
51 |
</div>
|
52 |
</div>
|
@@ -76,11 +85,8 @@
|
|
76 |
<div class="field">
|
77 |
<select class="dropdown" name="model" id="model">
|
78 |
<option value="gpt-3.5-turbo" selected>GPT-3.5</option>
|
79 |
-
<option value="gpt-3.5-turbo-
|
80 |
<option value="gpt-3.5-turbo-16k">GPT-3.5-turbo-16k</option>
|
81 |
-
<option value="gpt-3.5-turbo-16k-0613">
|
82 |
-
GPT-3.5-turbo-16k-0613
|
83 |
-
</option>
|
84 |
<option value="gpt-4">GPT-4</option>
|
85 |
</select>
|
86 |
</div>
|
|
|
12 |
content="A conversational AI system that listens, learns, and challenges" />
|
13 |
<meta property="og:url" content="https://chat.acy.dev" />
|
14 |
<link rel="stylesheet" href="{{ url_for('bp.static', filename='css/style.css') }}" />
|
15 |
+
<link
|
16 |
+
rel="apple-touch-icon"
|
17 |
+
sizes="180x180"
|
18 |
+
href="{{ url_for('bp.static', filename='img/apple-touch-icon.png') }}" />
|
19 |
+
<link
|
20 |
+
rel="icon"
|
21 |
+
type="image/png"
|
22 |
+
sizes="32x32"
|
23 |
+
href="{{ url_for('bp.static', filename='img/favicon-32x32.png') }}" />
|
24 |
+
<link
|
25 |
+
rel="icon"
|
26 |
+
type="image/png"
|
27 |
+
sizes="16x16"
|
28 |
+
href="{{ url_for('bp.static', filename='img/favicon-16x16.png') }}" />
|
29 |
<link rel="manifest" href="{{ url_for('bp.static', filename='img/site.webmanifest') }}" />
|
30 |
<link
|
31 |
rel="stylesheet"
|
|
|
55 |
</div>
|
56 |
<a class="info" href="https://github.com/ramonvc/gptfree-jailbreak-webui" target="_blank">
|
57 |
<i class="fa-brands fa-github"></i>
|
58 |
+
<span class="conversation-title"> Version: 0.0.10-Alpha </span>
|
|
|
|
|
59 |
</a>
|
60 |
</div>
|
61 |
</div>
|
|
|
85 |
<div class="field">
|
86 |
<select class="dropdown" name="model" id="model">
|
87 |
<option value="gpt-3.5-turbo" selected>GPT-3.5</option>
|
88 |
+
<option value="gpt-3.5-turbo-0301">GPT-3.5-turbo-0301</option>
|
89 |
<option value="gpt-3.5-turbo-16k">GPT-3.5-turbo-16k</option>
|
|
|
|
|
|
|
90 |
<option value="gpt-4">GPT-4</option>
|
91 |
</select>
|
92 |
</div>
|
g4f/Provider/Providers/Zeabur.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
from ...typing import sha256, Dict, get_type_hints
|
4 |
+
|
5 |
+
url = "https://gptleg.zeabur.app"
|
6 |
+
model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301',
|
7 |
+
'gpt-3.5-turbo-16k', 'gpt-4', 'gpt-4-0613']
|
8 |
+
supports_stream = True
|
9 |
+
needs_auth = False
|
10 |
+
|
11 |
+
|
12 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
13 |
+
headers = {
|
14 |
+
'Authority': 'chat.dfehub.com',
|
15 |
+
'Content-Type': 'application/json',
|
16 |
+
'Method': 'POST',
|
17 |
+
'Path': '/api/openai/v1/chat/completions',
|
18 |
+
'Scheme': 'https',
|
19 |
+
'Accept': 'text/event-stream',
|
20 |
+
'Accept-Language': 'pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6,zh;q=0.5',
|
21 |
+
'Content-Type': 'application/json',
|
22 |
+
'Origin': 'https://gptleg.zeabur.app',
|
23 |
+
'Referer': 'https://gptleg.zeabur.app/',
|
24 |
+
'Sec-Ch-Ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
25 |
+
'Sec-Ch-Ua-Mobile': '?0',
|
26 |
+
'Sec-Ch-Ua-Platform': '"Windows"',
|
27 |
+
'Sec-Fetch-Dest': 'empty',
|
28 |
+
'Sec-Fetch-Mode': 'cors',
|
29 |
+
'Sec-Fetch-Site': 'same-origin',
|
30 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
|
31 |
+
'X-Requested-With': 'XMLHttpRequest',
|
32 |
+
}
|
33 |
+
|
34 |
+
data = {
|
35 |
+
'model': model,
|
36 |
+
'temperature': 0.7,
|
37 |
+
'max_tokens': '16000',
|
38 |
+
'presence_penalty': 0,
|
39 |
+
'messages': messages,
|
40 |
+
}
|
41 |
+
|
42 |
+
response = requests.post(url + '/api/openai/v1/chat/completions',
|
43 |
+
headers=headers, json=data, stream=stream)
|
44 |
+
|
45 |
+
yield response.json()['choices'][0]['message']['content']
|
46 |
+
|
47 |
+
|
48 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
49 |
+
'(%s)' % ', '.join(
|
50 |
+
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/__init__.py
CHANGED
@@ -9,7 +9,6 @@ from .Providers import (
|
|
9 |
ChatgptLogin,
|
10 |
ChatgptLogin,
|
11 |
DeepAi,
|
12 |
-
Dfehub,
|
13 |
Easychat,
|
14 |
Ezcht,
|
15 |
Fakeopen,
|
@@ -28,6 +27,7 @@ from .Providers import (
|
|
28 |
Xiaor,
|
29 |
Yqcloud,
|
30 |
You,
|
|
|
31 |
)
|
32 |
|
33 |
Palm = Bard
|
|
|
9 |
ChatgptLogin,
|
10 |
ChatgptLogin,
|
11 |
DeepAi,
|
|
|
12 |
Easychat,
|
13 |
Ezcht,
|
14 |
Fakeopen,
|
|
|
27 |
Xiaor,
|
28 |
Yqcloud,
|
29 |
You,
|
30 |
+
Zeabur
|
31 |
)
|
32 |
|
33 |
Palm = Bard
|
g4f/models.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
from g4f import Provider
|
2 |
import random
|
3 |
|
|
|
4 |
class Model:
|
5 |
class model:
|
6 |
name: str
|
@@ -15,17 +16,22 @@ class Model:
|
|
15 |
class gpt_35_turbo_0613:
|
16 |
name: str = 'gpt-3.5-turbo-0613'
|
17 |
base_provider: str = 'openai'
|
18 |
-
best_provider: Provider.Provider = Provider.
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
class gpt_35_turbo_16k_0613:
|
21 |
name: str = 'gpt-3.5-turbo-16k-0613'
|
22 |
base_provider: str = 'openai'
|
23 |
-
best_provider: Provider.Provider = Provider.
|
24 |
|
25 |
class gpt_35_turbo_16k:
|
26 |
name: str = 'gpt-3.5-turbo-16k'
|
27 |
base_provider: str = 'openai'
|
28 |
-
best_provider: Provider.Provider = Provider.
|
29 |
|
30 |
class gpt_4_dev:
|
31 |
name: str = 'gpt-4-for-dev'
|
@@ -36,7 +42,7 @@ class Model:
|
|
36 |
name: str = 'gpt-4'
|
37 |
base_provider: str = 'openai'
|
38 |
best_provider: Provider.Provider = Provider.ChatgptAi
|
39 |
-
|
40 |
class gpt_4_0613:
|
41 |
name: str = 'gpt-4-0613'
|
42 |
base_provider: str = 'openai'
|
@@ -152,79 +158,80 @@ class Model:
|
|
152 |
name: str = 'text-davinci-003'
|
153 |
base_provider: str = 'openai'
|
154 |
best_provider: Provider.Provider = Provider.Vercel
|
155 |
-
|
156 |
class palm:
|
157 |
name: str = 'palm2'
|
158 |
base_provider: str = 'google'
|
159 |
best_provider: Provider.Provider = Provider.Bard
|
160 |
-
|
161 |
-
|
162 |
""" 'falcon-40b': Model.falcon_40b,
|
163 |
'falcon-7b': Model.falcon_7b,
|
164 |
'llama-13b': Model.llama_13b,"""
|
165 |
-
|
166 |
class falcon_40b:
|
167 |
name: str = 'falcon-40b'
|
168 |
base_provider: str = 'huggingface'
|
169 |
best_provider: Provider.Provider = Provider.H2o
|
170 |
-
|
171 |
class falcon_7b:
|
172 |
name: str = 'falcon-7b'
|
173 |
base_provider: str = 'huggingface'
|
174 |
best_provider: Provider.Provider = Provider.H2o
|
175 |
-
|
176 |
class llama_13b:
|
177 |
name: str = 'llama-13b'
|
178 |
base_provider: str = 'huggingface'
|
179 |
best_provider: Provider.Provider = Provider.H2o
|
180 |
-
|
|
|
181 |
class ModelUtils:
|
182 |
convert: dict = {
|
183 |
'gpt-3.5-turbo': Model.gpt_35_turbo,
|
184 |
'gpt-3.5-turbo-0613': Model.gpt_35_turbo_0613,
|
|
|
185 |
'gpt-4': Model.gpt_4,
|
186 |
'gpt-4-0613': Model.gpt_4_0613,
|
187 |
'gpt-4-for-dev': Model.gpt_4_dev,
|
188 |
'gpt-3.5-turbo-16k': Model.gpt_35_turbo_16k,
|
189 |
'gpt-3.5-turbo-16k-0613': Model.gpt_35_turbo_16k_0613,
|
190 |
-
|
191 |
'claude-instant-v1-100k': Model.claude_instant_v1_100k,
|
192 |
'claude-v1-100k': Model.claude_v1_100k,
|
193 |
'claude-instant-v1': Model.claude_instant_v1,
|
194 |
'claude-v1': Model.claude_v1,
|
195 |
-
|
196 |
'alpaca-7b': Model.alpaca_7b,
|
197 |
'stablelm-tuned-alpha-7b': Model.stablelm_tuned_alpha_7b,
|
198 |
-
|
199 |
'bloom': Model.bloom,
|
200 |
'bloomz': Model.bloomz,
|
201 |
-
|
202 |
'flan-t5-xxl': Model.flan_t5_xxl,
|
203 |
'flan-ul2': Model.flan_ul2,
|
204 |
-
|
205 |
'gpt-neox-20b': Model.gpt_neox_20b,
|
206 |
'oasst-sft-4-pythia-12b-epoch-3.5': Model.oasst_sft_4_pythia_12b_epoch_35,
|
207 |
'santacoder': Model.santacoder,
|
208 |
-
|
209 |
'command-medium-nightly': Model.command_medium_nightly,
|
210 |
'command-xlarge-nightly': Model.command_xlarge_nightly,
|
211 |
-
|
212 |
'code-cushman-001': Model.code_cushman_001,
|
213 |
'code-davinci-002': Model.code_davinci_002,
|
214 |
-
|
215 |
'text-ada-001': Model.text_ada_001,
|
216 |
'text-babbage-001': Model.text_babbage_001,
|
217 |
'text-curie-001': Model.text_curie_001,
|
218 |
'text-davinci-002': Model.text_davinci_002,
|
219 |
'text-davinci-003': Model.text_davinci_003,
|
220 |
-
|
221 |
'palm2': Model.palm,
|
222 |
'palm': Model.palm,
|
223 |
'google': Model.palm,
|
224 |
'google-bard': Model.palm,
|
225 |
'google-palm': Model.palm,
|
226 |
'bard': Model.palm,
|
227 |
-
|
228 |
'falcon-40b': Model.falcon_40b,
|
229 |
'falcon-7b': Model.falcon_7b,
|
230 |
'llama-13b': Model.llama_13b,
|
|
|
1 |
from g4f import Provider
|
2 |
import random
|
3 |
|
4 |
+
|
5 |
class Model:
|
6 |
class model:
|
7 |
name: str
|
|
|
16 |
class gpt_35_turbo_0613:
|
17 |
name: str = 'gpt-3.5-turbo-0613'
|
18 |
base_provider: str = 'openai'
|
19 |
+
best_provider: Provider.Provider = Provider.Zeabur
|
20 |
+
|
21 |
+
class gpt_35_turbo_0301:
|
22 |
+
name: str = 'gpt-3.5-turbo-0301'
|
23 |
+
base_provider: str = 'openai'
|
24 |
+
best_provider: Provider.Provider = Provider.Zeabur
|
25 |
|
26 |
class gpt_35_turbo_16k_0613:
|
27 |
name: str = 'gpt-3.5-turbo-16k-0613'
|
28 |
base_provider: str = 'openai'
|
29 |
+
best_provider: Provider.Provider = Provider.Zeabur
|
30 |
|
31 |
class gpt_35_turbo_16k:
|
32 |
name: str = 'gpt-3.5-turbo-16k'
|
33 |
base_provider: str = 'openai'
|
34 |
+
best_provider: Provider.Provider = Provider.Zeabur
|
35 |
|
36 |
class gpt_4_dev:
|
37 |
name: str = 'gpt-4-for-dev'
|
|
|
42 |
name: str = 'gpt-4'
|
43 |
base_provider: str = 'openai'
|
44 |
best_provider: Provider.Provider = Provider.ChatgptAi
|
45 |
+
|
46 |
class gpt_4_0613:
|
47 |
name: str = 'gpt-4-0613'
|
48 |
base_provider: str = 'openai'
|
|
|
158 |
name: str = 'text-davinci-003'
|
159 |
base_provider: str = 'openai'
|
160 |
best_provider: Provider.Provider = Provider.Vercel
|
161 |
+
|
162 |
class palm:
|
163 |
name: str = 'palm2'
|
164 |
base_provider: str = 'google'
|
165 |
best_provider: Provider.Provider = Provider.Bard
|
166 |
+
|
|
|
167 |
""" 'falcon-40b': Model.falcon_40b,
|
168 |
'falcon-7b': Model.falcon_7b,
|
169 |
'llama-13b': Model.llama_13b,"""
|
170 |
+
|
171 |
class falcon_40b:
|
172 |
name: str = 'falcon-40b'
|
173 |
base_provider: str = 'huggingface'
|
174 |
best_provider: Provider.Provider = Provider.H2o
|
175 |
+
|
176 |
class falcon_7b:
|
177 |
name: str = 'falcon-7b'
|
178 |
base_provider: str = 'huggingface'
|
179 |
best_provider: Provider.Provider = Provider.H2o
|
180 |
+
|
181 |
class llama_13b:
|
182 |
name: str = 'llama-13b'
|
183 |
base_provider: str = 'huggingface'
|
184 |
best_provider: Provider.Provider = Provider.H2o
|
185 |
+
|
186 |
+
|
187 |
class ModelUtils:
|
188 |
convert: dict = {
|
189 |
'gpt-3.5-turbo': Model.gpt_35_turbo,
|
190 |
'gpt-3.5-turbo-0613': Model.gpt_35_turbo_0613,
|
191 |
+
'gpt-3.5-turbo-0301': Model.gpt_35_turbo_0301,
|
192 |
'gpt-4': Model.gpt_4,
|
193 |
'gpt-4-0613': Model.gpt_4_0613,
|
194 |
'gpt-4-for-dev': Model.gpt_4_dev,
|
195 |
'gpt-3.5-turbo-16k': Model.gpt_35_turbo_16k,
|
196 |
'gpt-3.5-turbo-16k-0613': Model.gpt_35_turbo_16k_0613,
|
197 |
+
|
198 |
'claude-instant-v1-100k': Model.claude_instant_v1_100k,
|
199 |
'claude-v1-100k': Model.claude_v1_100k,
|
200 |
'claude-instant-v1': Model.claude_instant_v1,
|
201 |
'claude-v1': Model.claude_v1,
|
202 |
+
|
203 |
'alpaca-7b': Model.alpaca_7b,
|
204 |
'stablelm-tuned-alpha-7b': Model.stablelm_tuned_alpha_7b,
|
205 |
+
|
206 |
'bloom': Model.bloom,
|
207 |
'bloomz': Model.bloomz,
|
208 |
+
|
209 |
'flan-t5-xxl': Model.flan_t5_xxl,
|
210 |
'flan-ul2': Model.flan_ul2,
|
211 |
+
|
212 |
'gpt-neox-20b': Model.gpt_neox_20b,
|
213 |
'oasst-sft-4-pythia-12b-epoch-3.5': Model.oasst_sft_4_pythia_12b_epoch_35,
|
214 |
'santacoder': Model.santacoder,
|
215 |
+
|
216 |
'command-medium-nightly': Model.command_medium_nightly,
|
217 |
'command-xlarge-nightly': Model.command_xlarge_nightly,
|
218 |
+
|
219 |
'code-cushman-001': Model.code_cushman_001,
|
220 |
'code-davinci-002': Model.code_davinci_002,
|
221 |
+
|
222 |
'text-ada-001': Model.text_ada_001,
|
223 |
'text-babbage-001': Model.text_babbage_001,
|
224 |
'text-curie-001': Model.text_curie_001,
|
225 |
'text-davinci-002': Model.text_davinci_002,
|
226 |
'text-davinci-003': Model.text_davinci_003,
|
227 |
+
|
228 |
'palm2': Model.palm,
|
229 |
'palm': Model.palm,
|
230 |
'google': Model.palm,
|
231 |
'google-bard': Model.palm,
|
232 |
'google-palm': Model.palm,
|
233 |
'bard': Model.palm,
|
234 |
+
|
235 |
'falcon-40b': Model.falcon_40b,
|
236 |
'falcon-7b': Model.falcon_7b,
|
237 |
'llama-13b': Model.llama_13b,
|