Spaces:
Sleeping
Sleeping
Commit
·
cb14cec
0
Parent(s):
Duplicate from JunchuanYu/Sydne-AI
Browse filesCo-authored-by: JunchuanYu <JunchuanYu@users.noreply.huggingface.co>
- .gitattributes +34 -0
- README.md +13 -0
- app.py +123 -0
- requirements.txt +1 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Chatgpt Demo
|
3 |
+
emoji: 🐠
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: yellow
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.19.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: JunchuanYu/Sydne-AI
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import openai
|
4 |
+
import requests
|
5 |
+
import json
|
6 |
+
|
7 |
+
openai.api_key = os.environ.get("OPENAI_API_KEY")
|
8 |
+
|
9 |
+
prompt_templates = {"Default ChatGPT": ""}
|
10 |
+
|
11 |
+
def get_empty_state():
|
12 |
+
return {"total_tokens": 0, "messages": []}
|
13 |
+
|
14 |
+
def download_prompt_templates():
|
15 |
+
url = "https://raw.githubusercontent.com/f/awesome-chatgpt-prompts/main/prompts.csv"
|
16 |
+
response = requests.get(url)
|
17 |
+
|
18 |
+
for line in response.text.splitlines()[1:]:
|
19 |
+
act, prompt = line.split('","')
|
20 |
+
prompt_templates[act.replace('"', '')] = prompt.replace('"', '')
|
21 |
+
|
22 |
+
choices = list(prompt_templates.keys())
|
23 |
+
return gr.update(value=choices[0], choices=choices)
|
24 |
+
|
25 |
+
def on_token_change(user_token):
|
26 |
+
openai.api_key = user_token or os.environ.get("OPENAI_API_KEY")
|
27 |
+
|
28 |
+
def on_prompt_template_change(prompt_template):
|
29 |
+
if not isinstance(prompt_template, str): return
|
30 |
+
return prompt_templates[prompt_template]
|
31 |
+
|
32 |
+
def submit_message(user_token, prompt, prompt_template, temperature, max_tokens, state):
|
33 |
+
|
34 |
+
history = state['messages']
|
35 |
+
|
36 |
+
if not prompt:
|
37 |
+
return gr.update(value='', visible=state['total_tokens'] < 1_000), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: {state['total_tokens']} / 3000", state
|
38 |
+
|
39 |
+
prompt_template = prompt_templates[prompt_template]
|
40 |
+
|
41 |
+
system_prompt = []
|
42 |
+
if prompt_template:
|
43 |
+
system_prompt = [{ "role": "system", "content": prompt_template }]
|
44 |
+
|
45 |
+
prompt_msg = { "role": "user", "content": prompt }
|
46 |
+
|
47 |
+
try:
|
48 |
+
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
|
49 |
+
|
50 |
+
history.append(prompt_msg)
|
51 |
+
history.append(completion.choices[0].message.to_dict())
|
52 |
+
|
53 |
+
state['total_tokens'] += completion['usage']['total_tokens']
|
54 |
+
|
55 |
+
except Exception as e:
|
56 |
+
history.append(prompt_msg)
|
57 |
+
history.append({
|
58 |
+
"role": "system",
|
59 |
+
"content": f"Error: {e}"
|
60 |
+
})
|
61 |
+
|
62 |
+
total_tokens_used_msg = f"Total tokens used: {state['total_tokens']} / 3000" if not user_token else ""
|
63 |
+
chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
|
64 |
+
input_visibility = user_token or state['total_tokens'] < 3000
|
65 |
+
|
66 |
+
return gr.update(value='', visible=input_visibility), chat_messages, total_tokens_used_msg, state
|
67 |
+
|
68 |
+
def clear_conversation():
|
69 |
+
return gr.update(value=None, visible=True), None, "", get_empty_state()
|
70 |
+
|
71 |
+
css = """
|
72 |
+
#col-container {max-width: 80%; margin-left: auto; margin-right: auto;}
|
73 |
+
#chatbox {min-height: 400px;}
|
74 |
+
#header {text-align: center;}
|
75 |
+
#prompt_template_preview {padding: 1em; border-width: 1px; border-style: solid; border-color: #e0e0e0; border-radius: 4px;}
|
76 |
+
#total_tokens_str {text-align: right; font-size: 0.8em; color: #666; height: 1em;}
|
77 |
+
#label {font-size: 0.8em; padding: 0.5em; margin: 0;}
|
78 |
+
.message { font-size: 1.2em; }
|
79 |
+
"""
|
80 |
+
|
81 |
+
with gr.Blocks(css=css) as demo:
|
82 |
+
|
83 |
+
state = gr.State(get_empty_state())
|
84 |
+
|
85 |
+
|
86 |
+
with gr.Column(elem_id="col-container"):
|
87 |
+
gr.Markdown("""## OpenAI ChatGPT Demo
|
88 |
+
Using the ofiicial API (gpt-3.5-turbo model)<br>
|
89 |
+
Prompt templates from [awesome-chatgpt-prompts](https://github.com/f/awesome-chatgpt-prompts).<br>
|
90 |
+
Current limit is 3000 tokens per conversation.""",
|
91 |
+
elem_id="header")
|
92 |
+
|
93 |
+
with gr.Row():
|
94 |
+
with gr.Column():
|
95 |
+
chatbot = gr.Chatbot(elem_id="chatbox")
|
96 |
+
input_message = gr.Textbox(show_label=False, placeholder="Enter text and press enter", visible=True).style(container=False)
|
97 |
+
btn_submit = gr.Button("Submit")
|
98 |
+
total_tokens_str = gr.Markdown(elem_id="total_tokens_str")
|
99 |
+
btn_clear_conversation = gr.Button("🔃 Start New Conversation")
|
100 |
+
with gr.Column():
|
101 |
+
prompt_template = gr.Dropdown(label="Set a custom insruction for the chatbot:", choices=list(prompt_templates.keys()))
|
102 |
+
prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview")
|
103 |
+
gr.Markdown("Enter your own OpenAI API Key to remove the 3000 token limit. You can get it [here](https://platform.openai.com/account/api-keys).", elem_id="label")
|
104 |
+
user_token = gr.Textbox(placeholder="OpenAI API Key", type="password", show_label=False)
|
105 |
+
with gr.Accordion("Advanced parameters", open=False):
|
106 |
+
temperature = gr.Slider(minimum=0, maximum=2.0, value=0.7, step=0.1, interactive=True, label="Temperature (higher = more creative/chaotic)")
|
107 |
+
max_tokens = gr.Slider(minimum=100, maximum=4096, value=1000, step=1, interactive=True, label="Max tokens per response")
|
108 |
+
|
109 |
+
gr.HTML('''<br><br><br><center><a href="https://huggingface.co/spaces/anzorq/chatgpt-demo?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>You can duplicate this Space.<br>
|
110 |
+
Don't forget to set your own <a href="https://platform.openai.com/account/api-keys">OpenAI API Key</a> environment variable in Settings.<br>
|
111 |
+
<p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.chatgpt_api_demo_hf" alt="visitors"></p></center>''')
|
112 |
+
|
113 |
+
btn_submit.click(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, state], [input_message, chatbot, total_tokens_str, state])
|
114 |
+
input_message.submit(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, state], [input_message, chatbot, total_tokens_str, state])
|
115 |
+
btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
|
116 |
+
prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
|
117 |
+
user_token.change(on_token_change, inputs=[user_token], outputs=[])
|
118 |
+
|
119 |
+
|
120 |
+
demo.load(download_prompt_templates, inputs=None, outputs=[prompt_template])
|
121 |
+
|
122 |
+
|
123 |
+
demo.launch(debug=True, height='800px')
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
openai==0.27.0
|