zhaokeyao1
commited on
Commit
·
3a13b83
1
Parent(s):
758fbae
Update button
Browse files- app.py +5 -5
- new_chat.py +82 -0
app.py
CHANGED
@@ -38,7 +38,7 @@ class ChatgptAPI:
|
|
38 |
)
|
39 |
message=completion.choices[0].message.content
|
40 |
conversation.append_answer(message)
|
41 |
-
return conversation
|
42 |
|
43 |
|
44 |
def get_multi_round_completion(self, prompt, conversation, model='gpt-3.5-turbo'):
|
@@ -58,7 +58,7 @@ class ChatgptAPI:
|
|
58 |
return message, conversation
|
59 |
|
60 |
class Conversation:
|
61 |
-
def __init__(self, system_prompt='', num_of_round = 5):
|
62 |
self.num_of_round = num_of_round
|
63 |
self.history = []
|
64 |
self.initialized = False
|
@@ -103,7 +103,7 @@ def predict(system_input, password_input, user_in_file, user_input, conversation
|
|
103 |
|
104 |
if conversation.is_initialized() == False:
|
105 |
conversation = Conversation(system_input, 5)
|
106 |
-
conversation = chat_api.get_single_round_completion(user_in_file, user_input, conversation)
|
107 |
return conversation.get_history_messages(), conversation, None
|
108 |
#_, conversation = chat_api.get_multi_round_completion(user_input, conversation)
|
109 |
#return conversation.get_history_messages(), conversation, None
|
@@ -127,8 +127,8 @@ with gr.Blocks(css="#chatbot{height:350px} .overflow-y-auto{height:600px}") as d
|
|
127 |
user_in_txt = gr.Textbox(lines=3, label="User role content:", placeholder="Enter text...", container=False)
|
128 |
|
129 |
with gr.Row():
|
130 |
-
submit_button = gr.Button("
|
131 |
-
reset_button = gr.Button("
|
132 |
|
133 |
submit_button.click(predict, [system_in_txt, password_in_txt, user_in_file, user_in_txt, conversation], [chatbot, conversation, user_in_txt])
|
134 |
reset_button.click(clear_history, [conversation], [chatbot, conversation], queue=False)
|
|
|
38 |
)
|
39 |
message=completion.choices[0].message.content
|
40 |
conversation.append_answer(message)
|
41 |
+
return message, conversation
|
42 |
|
43 |
|
44 |
def get_multi_round_completion(self, prompt, conversation, model='gpt-3.5-turbo'):
|
|
|
58 |
return message, conversation
|
59 |
|
60 |
class Conversation:
|
61 |
+
def __init__(self, system_prompt='iii', num_of_round = 5):
|
62 |
self.num_of_round = num_of_round
|
63 |
self.history = []
|
64 |
self.initialized = False
|
|
|
103 |
|
104 |
if conversation.is_initialized() == False:
|
105 |
conversation = Conversation(system_input, 5)
|
106 |
+
_, conversation = chat_api.get_single_round_completion(user_in_file, user_input, conversation)
|
107 |
return conversation.get_history_messages(), conversation, None
|
108 |
#_, conversation = chat_api.get_multi_round_completion(user_input, conversation)
|
109 |
#return conversation.get_history_messages(), conversation, None
|
|
|
127 |
user_in_txt = gr.Textbox(lines=3, label="User role content:", placeholder="Enter text...", container=False)
|
128 |
|
129 |
with gr.Row():
|
130 |
+
submit_button = gr.Button("Reset")
|
131 |
+
reset_button = gr.Button("Submit")
|
132 |
|
133 |
submit_button.click(predict, [system_in_txt, password_in_txt, user_in_file, user_in_txt, conversation], [chatbot, conversation, user_in_txt])
|
134 |
reset_button.click(clear_history, [conversation], [chatbot, conversation], queue=False)
|
new_chat.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
from openai import OpenAI
|
3 |
+
|
4 |
+
class ChatgptAPI:
|
5 |
+
def __init__(self, ):
|
6 |
+
self.client = OpenAI(
|
7 |
+
api_key = "sk-u8YI0ArRHFRRdMEdboouRAXVc3PpR6EhZOfxO4tST5Ua9147",
|
8 |
+
base_url = "https://api.moonshot.cn/v1",
|
9 |
+
)
|
10 |
+
def get_single_round_completion(self, file_path, prompt, conversation):
|
11 |
+
file_object = client.files.create(file=Path(file_path), purpose="file-extract")
|
12 |
+
file_content = client.files.content(file_id=file_object.id).text
|
13 |
+
messages = [
|
14 |
+
{
|
15 |
+
"role": "system",
|
16 |
+
"content": "���� Kimi���� Moonshot AI �ṩ���˹��������֣�����ó����ĺ�Ӣ�ĵĶԻ������Ϊ�û��ṩ��ȫ���а�����ȷ�Ļش�ͬʱ�����ܾ�һ���漰�ֲ����壬�������ӣ���ɫ����������Ļش�Moonshot AI Ϊר�����ʣ����ɷ�����������ԡ�",
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"role": "system",
|
20 |
+
"content": file_content,
|
21 |
+
},
|
22 |
+
{"role": "user", "content": prompt},
|
23 |
+
]
|
24 |
+
completion = self.client.chat.completions.create(
|
25 |
+
model="moonshot-v1-32k",
|
26 |
+
messages=messages,
|
27 |
+
temperature=0.3,
|
28 |
+
)
|
29 |
+
return completion.choices[0].message
|
30 |
+
|
31 |
+
|
32 |
+
def get_multi_round_completion(self, prompt, conversation, model='gpt-3.5-turbo'):
|
33 |
+
conversation.append_question(prompt)
|
34 |
+
prompts = conversation.get_prompts()
|
35 |
+
|
36 |
+
response = openai.ChatCompletion.create(
|
37 |
+
model=model,
|
38 |
+
messages=prompts,
|
39 |
+
temperature=0,
|
40 |
+
max_tokens=2048,
|
41 |
+
top_p=1,
|
42 |
+
)
|
43 |
+
message = response.choices[0].message['content']
|
44 |
+
conversation.append_answer(message)
|
45 |
+
|
46 |
+
return message, conversation
|
47 |
+
|
48 |
+
class Conversation:
|
49 |
+
def __init__(self, system_prompt='', num_of_round = 5):
|
50 |
+
self.num_of_round = num_of_round
|
51 |
+
self.history = []
|
52 |
+
self.initialized = False
|
53 |
+
self.history.append({"role": "system", "content": system_prompt})
|
54 |
+
|
55 |
+
if len(system_prompt) > 0:
|
56 |
+
logger.info(f'Conversation initialized with system prompt: {system_prompt}')
|
57 |
+
self.initialized = True
|
58 |
+
|
59 |
+
def is_initialized(self):
|
60 |
+
return self.initialized
|
61 |
+
|
62 |
+
def append_question(self, question):
|
63 |
+
self.history.append({"role": "user", "content": question})
|
64 |
+
|
65 |
+
def append_answer(self, answer):
|
66 |
+
self.history.append({"role": "assistant", "content": answer})
|
67 |
+
|
68 |
+
if len(self.history) > self.num_of_round * 2:
|
69 |
+
del self.history[1:3]
|
70 |
+
|
71 |
+
def clear(self):
|
72 |
+
self.history.clear()
|
73 |
+
self.initialized = False
|
74 |
+
|
75 |
+
def get_prompts(self):
|
76 |
+
return self.history
|
77 |
+
|
78 |
+
def round_size(self):
|
79 |
+
return 0 if len(self.history) < 2 else len(self.hitory) - 1
|
80 |
+
|
81 |
+
def get_history_messages(self):
|
82 |
+
return [(u['content'], b['content']) for u,b in zip(self.history[1::2], self.history[2::2])]
|