zhaokeyao1
commited on
Commit
·
90da432
1
Parent(s):
8de6ea0
Update space
Browse files- new_chat.py +0 -82
new_chat.py
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from openai import OpenAI
|
3 |
-
|
4 |
-
class ChatgptAPI:
|
5 |
-
def __init__(self, ):
|
6 |
-
self.client = OpenAI(
|
7 |
-
api_key = "sk-u8YI0ArRHFRRdMEdboouRAXVc3PpR6EhZOfxO4tST5Ua9147",
|
8 |
-
base_url = "https://api.moonshot.cn/v1",
|
9 |
-
)
|
10 |
-
def get_single_round_completion(self, file_path, prompt, conversation):
|
11 |
-
file_object = client.files.create(file=Path(file_path), purpose="file-extract")
|
12 |
-
file_content = client.files.content(file_id=file_object.id).text
|
13 |
-
messages = [
|
14 |
-
{
|
15 |
-
"role": "system",
|
16 |
-
"content": "���� Kimi���� Moonshot AI �ṩ���˹��������֣�����ó����ĺ�Ӣ�ĵĶԻ������Ϊ�û��ṩ��ȫ���а�����ȷ�Ļش�ͬʱ�����ܾ�һ���漰�ֲ����壬�������ӣ���ɫ����������Ļش�Moonshot AI Ϊר�����ʣ����ɷ�����������ԡ�",
|
17 |
-
},
|
18 |
-
{
|
19 |
-
"role": "system",
|
20 |
-
"content": file_content,
|
21 |
-
},
|
22 |
-
{"role": "user", "content": prompt},
|
23 |
-
]
|
24 |
-
completion = self.client.chat.completions.create(
|
25 |
-
model="moonshot-v1-32k",
|
26 |
-
messages=messages,
|
27 |
-
temperature=0.3,
|
28 |
-
)
|
29 |
-
return completion.choices[0].message
|
30 |
-
|
31 |
-
|
32 |
-
def get_multi_round_completion(self, prompt, conversation, model='gpt-3.5-turbo'):
|
33 |
-
conversation.append_question(prompt)
|
34 |
-
prompts = conversation.get_prompts()
|
35 |
-
|
36 |
-
response = openai.ChatCompletion.create(
|
37 |
-
model=model,
|
38 |
-
messages=prompts,
|
39 |
-
temperature=0,
|
40 |
-
max_tokens=2048,
|
41 |
-
top_p=1,
|
42 |
-
)
|
43 |
-
message = response.choices[0].message['content']
|
44 |
-
conversation.append_answer(message)
|
45 |
-
|
46 |
-
return message, conversation
|
47 |
-
|
48 |
-
class Conversation:
|
49 |
-
def __init__(self, system_prompt='', num_of_round = 5):
|
50 |
-
self.num_of_round = num_of_round
|
51 |
-
self.history = []
|
52 |
-
self.initialized = False
|
53 |
-
self.history.append({"role": "system", "content": system_prompt})
|
54 |
-
|
55 |
-
if len(system_prompt) > 0:
|
56 |
-
logger.info(f'Conversation initialized with system prompt: {system_prompt}')
|
57 |
-
self.initialized = True
|
58 |
-
|
59 |
-
def is_initialized(self):
|
60 |
-
return self.initialized
|
61 |
-
|
62 |
-
def append_question(self, question):
|
63 |
-
self.history.append({"role": "user", "content": question})
|
64 |
-
|
65 |
-
def append_answer(self, answer):
|
66 |
-
self.history.append({"role": "assistant", "content": answer})
|
67 |
-
|
68 |
-
if len(self.history) > self.num_of_round * 2:
|
69 |
-
del self.history[1:3]
|
70 |
-
|
71 |
-
def clear(self):
|
72 |
-
self.history.clear()
|
73 |
-
self.initialized = False
|
74 |
-
|
75 |
-
def get_prompts(self):
|
76 |
-
return self.history
|
77 |
-
|
78 |
-
def round_size(self):
|
79 |
-
return 0 if len(self.history) < 2 else len(self.hitory) - 1
|
80 |
-
|
81 |
-
def get_history_messages(self):
|
82 |
-
return [(u['content'], b['content']) for u,b in zip(self.history[1::2], self.history[2::2])]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|