zhaokeyao1 commited on
Commit
63b2df0
·
1 Parent(s): 66877e4

Update app

Browse files
Files changed (1) hide show
  1. app.py +88 -2
app.py CHANGED
@@ -1,10 +1,96 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
- from new_chat import Conversation, ChatgptAPI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  chat_api = ChatgptAPI()
10
 
 
1
  import gradio as gr
2
+ #from huggingface_hub import InferenceClient
3
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
+ #from new_chat import Conversation, ChatgptAPI
8
+
9
+
10
+ from pathlib import Path
11
+ from openai import OpenAI
12
+
13
+ class ChatgptAPI:
14
+ def __init__(self, ):
15
+ self.client = OpenAI(
16
+ api_key = "sk-u8YI0ArRHFRRdMEdboouRAXVc3PpR6EhZOfxO4tST5Ua9147",
17
+ base_url = "https://api.moonshot.cn/v1",
18
+ )
19
+ def get_single_round_completion(self, file_path, prompt, conversation):
20
+ file_object = client.files.create(file=Path(file_path), purpose="file-extract")
21
+ file_content = client.files.content(file_id=file_object.id).text
22
+ messages = [
23
+ {
24
+ "role": "system",
25
+ "content": "你是 Kimi,由 Moonshot AI 提供的人工智能助手,你更擅长中文和英文的对话。你会为用户提供安全,有帮助,准确的回答。同时,你会拒绝一切涉及恐怖主义,种族歧视,黄色暴力等问题的回答。Moonshot AI 为专有名词,不可翻译成其他语言。",
26
+ },
27
+ {
28
+ "role": "system",
29
+ "content": file_content,
30
+ },
31
+ {"role": "user", "content": prompt},
32
+ ]
33
+ completion = self.client.chat.completions.create(
34
+ model="moonshot-v1-32k",
35
+ messages=messages,
36
+ temperature=0.3,
37
+ )
38
+ return completion.choices[0].message
39
+
40
+
41
+ def get_multi_round_completion(self, prompt, conversation, model='gpt-3.5-turbo'):
42
+ conversation.append_question(prompt)
43
+ prompts = conversation.get_prompts()
44
+
45
+ response = openai.ChatCompletion.create(
46
+ model=model,
47
+ messages=prompts,
48
+ temperature=0,
49
+ max_tokens=2048,
50
+ top_p=1,
51
+ )
52
+ message = response.choices[0].message['content']
53
+ conversation.append_answer(message)
54
+
55
+ return message, conversation
56
+
57
+ class Conversation:
58
+ def __init__(self, system_prompt='', num_of_round = 5):
59
+ self.num_of_round = num_of_round
60
+ self.history = []
61
+ self.initialized = False
62
+ self.history.append({"role": "system", "content": system_prompt})
63
+
64
+ if len(system_prompt) > 0:
65
+ logger.info(f'Conversation initialized with system prompt: {system_prompt}')
66
+ self.initialized = True
67
+
68
+ def is_initialized(self):
69
+ return self.initialized
70
+
71
+ def append_question(self, question):
72
+ self.history.append({"role": "user", "content": question})
73
+
74
+ def append_answer(self, answer):
75
+ self.history.append({"role": "assistant", "content": answer})
76
+
77
+ if len(self.history) > self.num_of_round * 2:
78
+ del self.history[1:3]
79
+
80
+ def clear(self):
81
+ self.history.clear()
82
+ self.initialized = False
83
+
84
+ def get_prompts(self):
85
+ return self.history
86
+
87
+ def round_size(self):
88
+ return 0 if len(self.history) < 2 else len(self.hitory) - 1
89
+
90
+ def get_history_messages(self):
91
+ return [(u['content'], b['content']) for u,b in zip(self.history[1::2], self.history[2::2])]
92
+
93
+
94
 
95
  chat_api = ChatgptAPI()
96