Spaces:
Runtime error
Runtime error
yizhangliu
commited on
Commit
•
f633886
1
Parent(s):
f21f8e3
Update app.py
Browse files
app.py
CHANGED
@@ -5,8 +5,18 @@ from loguru import logger
|
|
5 |
import paddlehub as hub
|
6 |
import random
|
7 |
|
|
|
8 |
language_translation_model = hub.Module(directory=f'./baidu_translate')
|
9 |
def getTextTrans(text, source='zh', target='en'):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
try:
|
11 |
text_translation = language_translation_model.translate(text, source, target)
|
12 |
return text_translation
|
@@ -18,20 +28,21 @@ session_token = os.environ.get('SessionToken')
|
|
18 |
|
19 |
def get_api():
|
20 |
api = None
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
26 |
return api
|
27 |
-
|
28 |
-
|
|
|
29 |
if api is None:
|
30 |
-
# return "Sorry, I'm busy. Try again later.(1)"
|
31 |
return "Openai said: I'm too tired. Let me lie down for a few days. If you like, you can visit my home."
|
32 |
try:
|
33 |
resp = api.send_message(text)
|
34 |
-
api.refresh_auth()
|
35 |
# api.reset_conversation()
|
36 |
response = resp['message']
|
37 |
conversation_id = resp['conversation_id']
|
@@ -39,46 +50,9 @@ def get_response_from_chatbot(api, text):
|
|
39 |
# logger.info(f"response_: {response}")
|
40 |
logger.info(f"conversation_id_: [{conversation_id}] / parent_id: [{parent_id}]")
|
41 |
except:
|
42 |
-
# response = "Sorry, I'm busy. Try again later.(2)"
|
43 |
response = "Openai said: I'm so tired. Let me lie down for a few days. If you like, you can visit my home."
|
44 |
return response
|
45 |
|
46 |
-
model_ids = {
|
47 |
-
# "models/stabilityai/stable-diffusion-2-1":"sd-v2-1",
|
48 |
-
# "models/stabilityai/stable-diffusion-2":"sd-v2-0",
|
49 |
-
# "models/runwayml/stable-diffusion-v1-5":"sd-v1-5",
|
50 |
-
# "models/CompVis/stable-diffusion-v1-4":"sd-v1-4",
|
51 |
-
"models/prompthero/openjourney":"openjourney",
|
52 |
-
# "models/ShadoWxShinigamI/Midjourney-Rangoli":"midjourney",
|
53 |
-
# "models/hakurei/waifu-diffusion":"waifu-diffusion",
|
54 |
-
# "models/Linaqruf/anything-v3.0":"anything-v3.0",
|
55 |
-
}
|
56 |
-
|
57 |
-
tab_actions = []
|
58 |
-
tab_titles = []
|
59 |
-
for model_id in model_ids.keys():
|
60 |
-
print(model_id, model_ids[model_id])
|
61 |
-
try:
|
62 |
-
tab = gr.Interface.load(model_id)
|
63 |
-
tab_actions.append(tab)
|
64 |
-
tab_titles.append(model_ids[model_id])
|
65 |
-
except:
|
66 |
-
logger.info(f"load_fail__{model_id}_")
|
67 |
-
|
68 |
-
def chat(api, input0, input1, chat_radio, chat_history):
|
69 |
-
out_chat = []
|
70 |
-
if chat_history != '':
|
71 |
-
out_chat = json.loads(chat_history)
|
72 |
-
logger.info(f"out_chat_: {len(out_chat)} / {chat_radio}")
|
73 |
-
if chat_radio == "Talk to chatGPT":
|
74 |
-
response = get_response_from_chatbot(api, input0)
|
75 |
-
out_chat.append((input0, response))
|
76 |
-
chat_history = json.dumps(out_chat)
|
77 |
-
return api, out_chat, input1, chat_history
|
78 |
-
else:
|
79 |
-
prompt_en = getTextTrans(input0, source='zh', target='en') + f',{random.randint(0,sys.maxsize)}'
|
80 |
-
return api, out_chat, prompt_en, chat_history
|
81 |
-
|
82 |
start_work = """async() => {
|
83 |
function isMobile() {
|
84 |
try {
|
@@ -219,26 +193,30 @@ start_work = """async() => {
|
|
219 |
text0 = texts[0];
|
220 |
text1 = texts[1];
|
221 |
img_index = 0;
|
222 |
-
|
223 |
-
|
|
|
224 |
window['doCheckPrompt'] = 1;
|
225 |
-
window['prevPrompt'] =
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
|
|
|
|
|
|
230 |
setTimeout(function() {
|
231 |
-
|
232 |
-
for (var i = 0; i <
|
233 |
-
if (
|
234 |
-
|
235 |
}
|
236 |
}
|
237 |
window['doCheckPrompt'] = 0;
|
238 |
}, 10);
|
239 |
}
|
240 |
tabitems = window['gradioEl'].querySelectorAll('.tabitem');
|
241 |
-
imgs = tabitems[img_index].children[0].children[1].children[1].
|
242 |
if (imgs.length > 0) {
|
243 |
if (window['prevImgSrc'] !== imgs[0].src) {
|
244 |
var user_div = document.createElement("div");
|
@@ -259,8 +237,8 @@ start_work = """async() => {
|
|
259 |
save_conversation(window['chat_bot1'].children[2].children[0]);
|
260 |
}
|
261 |
}
|
262 |
-
if (tabitems[img_index].children[0].children[1].children[1].children[0].children
|
263 |
-
|
264 |
} else {
|
265 |
window['chat_bot1'].children[1].textContent = '';
|
266 |
}
|
@@ -275,7 +253,40 @@ start_work = """async() => {
|
|
275 |
return false;
|
276 |
}"""
|
277 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
278 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
279 |
with gr.Blocks(title='Talk to chatGPT') as demo:
|
280 |
gr.HTML("<p>You can duplicating this space and use your own session token: <a style='display:inline-block' href='https://huggingface.co/spaces/yizhangliu/chatGPT?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14' alt='Duplicate Space'></a></p>")
|
281 |
gr.HTML("<p> Instruction on how to get session token can be seen in video <a style='display:inline-block' href='https://www.youtube.com/watch?v=TdNSj_qgdFk'><font style='color:blue;weight:bold;'>here</font></a>. Add your session token by going to settings and add under secrets. </p>")
|
@@ -316,3 +327,4 @@ with gr.Blocks(title='Talk to chatGPT') as demo:
|
|
316 |
tab_img = gr.TabbedInterface(tab_actions, tab_titles)
|
317 |
|
318 |
demo.launch(debug = True)
|
|
|
|
5 |
import paddlehub as hub
|
6 |
import random
|
7 |
|
8 |
+
|
9 |
language_translation_model = hub.Module(directory=f'./baidu_translate')
|
10 |
def getTextTrans(text, source='zh', target='en'):
|
11 |
+
def is_chinese(string):
|
12 |
+
for ch in string:
|
13 |
+
if u'\u4e00' <= ch <= u'\u9fff':
|
14 |
+
return True
|
15 |
+
return False
|
16 |
+
|
17 |
+
if not is_chinese(text) and target == 'en':
|
18 |
+
return text
|
19 |
+
|
20 |
try:
|
21 |
text_translation = language_translation_model.translate(text, source, target)
|
22 |
return text_translation
|
|
|
28 |
|
29 |
def get_api():
|
30 |
api = None
|
31 |
+
try:
|
32 |
+
api = ChatGPT(session_token)
|
33 |
+
# api.refresh_auth()
|
34 |
+
except Exception as e:
|
35 |
+
print(f'get_api_error:', e)
|
36 |
+
api = None
|
37 |
return api
|
38 |
+
|
39 |
+
|
40 |
+
def get_response_from_chatgpt(api, text):
|
41 |
if api is None:
|
|
|
42 |
return "Openai said: I'm too tired. Let me lie down for a few days. If you like, you can visit my home."
|
43 |
try:
|
44 |
resp = api.send_message(text)
|
45 |
+
# api.refresh_auth()
|
46 |
# api.reset_conversation()
|
47 |
response = resp['message']
|
48 |
conversation_id = resp['conversation_id']
|
|
|
50 |
# logger.info(f"response_: {response}")
|
51 |
logger.info(f"conversation_id_: [{conversation_id}] / parent_id: [{parent_id}]")
|
52 |
except:
|
|
|
53 |
response = "Openai said: I'm so tired. Let me lie down for a few days. If you like, you can visit my home."
|
54 |
return response
|
55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
start_work = """async() => {
|
57 |
function isMobile() {
|
58 |
try {
|
|
|
193 |
text0 = texts[0];
|
194 |
text1 = texts[1];
|
195 |
img_index = 0;
|
196 |
+
text_value = text1.value;
|
197 |
+
if (window['doCheckPrompt'] === 0 && window['prevPrompt'] !== text_value) {
|
198 |
+
console.log('_____new prompt___[' + text_value + ']_');
|
199 |
window['doCheckPrompt'] = 1;
|
200 |
+
window['prevPrompt'] = text_value;
|
201 |
+
|
202 |
+
tabitems = window['gradioEl'].querySelectorAll('.tabitem');
|
203 |
+
for (var i = 0; i < tabitems.length; i++) {
|
204 |
+
inputText = tabitems[i].children[0].children[1].children[0].querySelectorAll('.gr-text-input')[0];
|
205 |
+
setNativeValue(inputText, text_value);
|
206 |
+
inputText.dispatchEvent(new Event('input', { bubbles: true }));
|
207 |
+
}
|
208 |
setTimeout(function() {
|
209 |
+
btns = window['gradioEl'].querySelectorAll('button');
|
210 |
+
for (var i = 0; i < btns.length; i++) {
|
211 |
+
if (['Generate image','Run'].includes(btns[i].innerText)) {
|
212 |
+
btns[i].click();
|
213 |
}
|
214 |
}
|
215 |
window['doCheckPrompt'] = 0;
|
216 |
}, 10);
|
217 |
}
|
218 |
tabitems = window['gradioEl'].querySelectorAll('.tabitem');
|
219 |
+
imgs = tabitems[img_index].children[0].children[1].children[1].querySelectorAll("img");
|
220 |
if (imgs.length > 0) {
|
221 |
if (window['prevImgSrc'] !== imgs[0].src) {
|
222 |
var user_div = document.createElement("div");
|
|
|
237 |
save_conversation(window['chat_bot1'].children[2].children[0]);
|
238 |
}
|
239 |
}
|
240 |
+
if (tabitems[img_index].children[0].children[1].children[1].children[0].children.length > 1) {
|
241 |
+
window['chat_bot1'].children[1].textContent = tabitems[img_index].children[0].children[1].children[1].children[0].textContent;
|
242 |
} else {
|
243 |
window['chat_bot1'].children[1].textContent = '';
|
244 |
}
|
|
|
253 |
return false;
|
254 |
}"""
|
255 |
|
256 |
+
space_ids = {
|
257 |
+
"spaces/stabilityai/stable-diffusion":"Stable Diffusion 2.1",
|
258 |
+
# "spaces/runwayml/stable-diffusion-v1-5":"Stable Diffusion 1.5",
|
259 |
+
# "spaces/stabilityai/stable-diffusion-1":"Stable Diffusion 1.0",
|
260 |
+
}
|
261 |
+
|
262 |
+
tab_actions = []
|
263 |
+
tab_titles = []
|
264 |
|
265 |
+
for space_id in space_ids.keys():
|
266 |
+
print(space_id, space_ids[space_id])
|
267 |
+
try:
|
268 |
+
tab = gr.Interface.load(space_id)
|
269 |
+
tab_actions.append(tab)
|
270 |
+
tab_titles.append(space_ids[space_id])
|
271 |
+
except Exception as e:
|
272 |
+
logger.info(f"load_fail__{space_id}_{e}")
|
273 |
+
|
274 |
+
def chat(api, input0, input1, chat_radio, chat_history):
|
275 |
+
out_chat = []
|
276 |
+
if chat_history != '':
|
277 |
+
out_chat = json.loads(chat_history)
|
278 |
+
logger.info(f"out_chat_: {len(out_chat)} / {chat_radio}")
|
279 |
+
if chat_radio == "Talk to chatGPT":
|
280 |
+
response = get_response_from_chatgpt(api, input0)
|
281 |
+
# response = get_response_from_microsoft(input0)
|
282 |
+
# response = get_response_from_skywork(input0)
|
283 |
+
out_chat.append((input0, response))
|
284 |
+
chat_history = json.dumps(out_chat)
|
285 |
+
return api, out_chat, input1, chat_history
|
286 |
+
else:
|
287 |
+
prompt_en = getTextTrans(input0, source='zh', target='en') + f',{random.randint(0,sys.maxsize)}'
|
288 |
+
return api, out_chat, prompt_en, chat_history
|
289 |
+
|
290 |
with gr.Blocks(title='Talk to chatGPT') as demo:
|
291 |
gr.HTML("<p>You can duplicating this space and use your own session token: <a style='display:inline-block' href='https://huggingface.co/spaces/yizhangliu/chatGPT?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14' alt='Duplicate Space'></a></p>")
|
292 |
gr.HTML("<p> Instruction on how to get session token can be seen in video <a style='display:inline-block' href='https://www.youtube.com/watch?v=TdNSj_qgdFk'><font style='color:blue;weight:bold;'>here</font></a>. Add your session token by going to settings and add under secrets. </p>")
|
|
|
327 |
tab_img = gr.TabbedInterface(tab_actions, tab_titles)
|
328 |
|
329 |
demo.launch(debug = True)
|
330 |
+
|