Spaces:
lqfjun
/
Runtime error

lqfjun commited on
Commit
6835b88
1 Parent(s): 1a6889e

Update api1.py

Browse files
Files changed (1) hide show
  1. api1.py +52 -245
api1.py CHANGED
@@ -1,248 +1,55 @@
1
- #!/usr/bin/env python3
2
- import argparse
3
- from flask import Flask, jsonify, request, Response
4
- import urllib.parse
5
- import requests
6
- import time
7
- import json
8
 
 
 
 
 
 
 
 
 
9
 
10
  app = Flask(__name__)
11
- slot_id = -1
12
-
13
- parser = argparse.ArgumentParser(description="An example of using server.cpp with a similar API to OAI. It must be used together with server.cpp.")
14
- parser.add_argument("--chat-prompt", type=str, help="the top prompt in chat completions(default: 'A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n')", default='A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n')
15
- parser.add_argument("--user-name", type=str, help="USER name in chat completions(default: '\\nUSER: ')", default="\\GPT4 User:")
16
- parser.add_argument("--ai-name", type=str, help="ASSISTANT name in chat completions(default: '\\nASSISTANT: ')", default="\\nGPT4 Assistant:")
17
- parser.add_argument("--system-name", type=str, help="SYSTEM name in chat completions(default: '\\nASSISTANT's RULE: ')", default="")
18
- parser.add_argument("--stop", type=str, help="the end of response in chat completions(default: '</s>')", default="<|end_of_turn|>")
19
- parser.add_argument("--llama-api", type=str, help="Set the address of server.cpp in llama.cpp(default: http://rpoly1.ddns.net:8818)", default='http://rpoly1.ddns.net:8818')
20
- parser.add_argument("--api-key", type=str, help="Set the api key to allow only few user(default: NULL)", default="")
21
- parser.add_argument("--host", type=str, help="Set the ip address to listen.(default: 127.0.0.1)", default='0.0.0.0')
22
- parser.add_argument("--port", type=int, help="Set the port to listen.(default: 8081)", default=7860)
23
-
24
- args = parser.parse_args()
25
-
26
- def is_present(json, key):
27
- try:
28
- buf = json[key]
29
- except KeyError:
30
- return False
31
- if json[key] == None:
32
- return False
33
- return True
34
-
35
- #convert chat to prompt
36
- def convert_chat(messages):
37
- #prompt = "" + args.chat_prompt.replace("\\n", "\n")
38
- prompt =""
39
- system_n = args.system_name.replace("\\n", "\n")
40
- user_n = args.user_name.replace("\\n", "\n")
41
- ai_n = args.ai_name.replace("\\n", "\n")
42
- stop = args.stop.replace("\\n", "\n")
43
-
44
-
45
- for line in messages[:-1]:
46
- if (line["role"] == "system"):
47
- prompt += f"{system_n}{line['content']}{stop}"
48
- if (line["role"] == "user"):
49
- prompt += f"{user_n}{line['content']}{stop}"
50
- if (line["role"] == "assistant"):
51
- prompt += f"{ai_n}{line['content']}{stop}"
52
- if (messages[-1]["role"] == "user"):
53
- prompt += f"{user_n}{messages[-1]['content']}{stop}"
54
- prompt += f"{ai_n}"
55
- elif (messages[-1]["role"] == "assistant"):
56
- #continue
57
- prompt += f"{ai_n}{messages[-1]['content']}"
58
-
59
- prompt += ai_n.rstrip()
60
-
61
- return prompt
62
- def convert_chat1(messages):
63
- prompt = "" + args.chat_prompt.replace("\\n", "\n")
64
-
65
- system_n = args.system_name.replace("\\n", "\n")
66
- user_n = args.user_name.replace("\\n", "\n")
67
- ai_n = args.ai_name.replace("\\n", "\n")
68
- stop = args.stop.replace("\\n", "\n")
69
-
70
-
71
- for line in messages:
72
- if (line["role"] == "system"):
73
- prompt += f"{system_n}{line['content']}"
74
- if (line["role"] == "user"):
75
- prompt += f"{user_n}{line['content']}"
76
- if (line["role"] == "assistant"):
77
- prompt += f"{ai_n}{line['content']}{stop}"
78
- prompt += ai_n.rstrip()
79
-
80
- return prompt
81
- def make_postData(body, chat=False, stream=False):
82
- postData = {}
83
- if (chat):
84
- postData["prompt"] = convert_chat(body["messages"])
85
- else:
86
- postData["prompt"] = body["prompt"]
87
- if(is_present(body, "temperature")): postData["temperature"] = body["temperature"]
88
- if(is_present(body, "top_k")): postData["top_k"] = body["top_k"]
89
- if(is_present(body, "top_p")): postData["top_p"] = body["top_p"]
90
- if(is_present(body, "max_tokens")): postData["n_predict"] = body["max_tokens"]
91
- if(is_present(body, "presence_penalty")): postData["presence_penalty"] = body["presence_penalty"]
92
- if(is_present(body, "frequency_penalty")): postData["frequency_penalty"] = body["frequency_penalty"]
93
- if(is_present(body, "repeat_penalty")): postData["repeat_penalty"] = body["repeat_penalty"]
94
- if(is_present(body, "mirostat")): postData["mirostat"] = body["mirostat"]
95
- if(is_present(body, "mirostat_tau")): postData["mirostat_tau"] = body["mirostat_tau"]
96
- if(is_present(body, "mirostat_eta")): postData["mirostat_eta"] = body["mirostat_eta"]
97
- if(is_present(body, "seed")): postData["seed"] = body["seed"]
98
- if(is_present(body, "logit_bias")): postData["logit_bias"] = [[int(token), body["logit_bias"][token]] for token in body["logit_bias"].keys()]
99
- if (args.stop != ""):
100
- postData["stop"] = [args.stop]
101
- else:
102
- postData["stop"] = []
103
- if(is_present(body, "stop")): postData["stop"] += body["stop"]
104
- postData["n_keep"] = -1
105
- postData["stream"] = stream
106
- postData["cache_prompt"] = True
107
- postData["slot_id"] = slot_id
108
- return postData
109
-
110
- def make_resData(data, chat=False, promptToken=[]):
111
- resData = {
112
- "id": "chatcmpl" if (chat) else "cmpl",
113
- "object": "chat.completion" if (chat) else "text_completion",
114
- "created": int(time.time()),
115
- "truncated": data["truncated"],
116
- "model": "LLaMA_CPP",
117
- "usage": {
118
- "prompt_tokens": data["tokens_evaluated"],
119
- "completion_tokens": data["tokens_predicted"],
120
- "total_tokens": data["tokens_evaluated"] + data["tokens_predicted"]
121
- }
122
- }
123
- if (len(promptToken) != 0):
124
- resData["promptToken"] = promptToken
125
- if (chat):
126
- #only one choice is supported
127
- resData["choices"] = [{
128
- "index": 0,
129
- "message": {
130
- "role": "assistant",
131
- "content": data["content"],
132
- },
133
- "finish_reason": "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length"
134
- }]
135
- else:
136
- #only one choice is supported
137
- resData["choices"] = [{
138
- "text": data["content"],
139
- "index": 0,
140
- "logprobs": None,
141
- "finish_reason": "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length"
142
- }]
143
- return resData
144
-
145
- def make_resData_stream(data, chat=False, time_now = 0, start=False):
146
- resData = {
147
- "id": "chatcmpl" if (chat) else "cmpl",
148
- "object": "chat.completion.chunk" if (chat) else "text_completion.chunk",
149
- "created": time_now,
150
- "model": "LLaMA_CPP",
151
- "choices": [
152
- {
153
- "finish_reason": None,
154
- "index": 0
155
- }
156
- ]
157
- }
158
- slot_id = data["slot_id"]
159
- if (chat):
160
- if (start):
161
- resData["choices"][0]["delta"] = {
162
- "role": "assistant"
163
- }
164
- else:
165
- resData["choices"][0]["delta"] = {
166
- "content": data["content"]
167
- }
168
- if (data["stop"]):
169
- resData["choices"][0]["finish_reason"] = "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length"
170
- else:
171
- resData["choices"][0]["text"] = data["content"]
172
- if (data["stop"]):
173
- resData["choices"][0]["finish_reason"] = "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length"
174
-
175
- return resData
176
-
177
-
178
- @app.route('/chat/completions', methods=['POST'])
179
- @app.route('/v1/chat/completions', methods=['POST'])
180
- def chat_completions():
181
- #if (args.api_key != "" and request.headers["Authorization"].split()[1] != args.api_key):
182
- # return Response(status=403)
183
- body = request.get_json()
184
- stream = False
185
- tokenize = False
186
- if(is_present(body, "stream")): stream = body["stream"]
187
- if(is_present(body, "tokenize")): tokenize = body["tokenize"]
188
- postData = make_postData(body, chat=True, stream=stream)
189
-
190
- promptToken = []
191
- if (tokenize):
192
- tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json()
193
- promptToken = tokenData["tokens"]
194
-
195
- if (not stream):
196
- data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData))
197
- print(data.json())
198
- resData = make_resData(data.json(), chat=True, promptToken=promptToken)
199
- return jsonify(resData)
200
- else:
201
- def generate():
202
- data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData), stream=True)
203
- time_now = int(time.time())
204
- resData = make_resData_stream({}, chat=True, time_now=time_now, start=True)
205
- yield 'data: {}\n'.format(json.dumps(resData))
206
- for line in data.iter_lines():
207
- if line:
208
- decoded_line = line.decode('utf-8')
209
- resData = make_resData_stream(json.loads(decoded_line[6:]), chat=True, time_now=time_now)
210
- yield 'data: {}\n'.format(json.dumps(resData))
211
- return Response(generate(), mimetype='text/event-stream')
212
-
213
-
214
- @app.route('/completions', methods=['POST'])
215
- @app.route('/v1/completions', methods=['POST'])
216
- def completion():
217
- #if (args.api_key != "" and request.headers["Authorization"].split()[1] != args.api_key):
218
- # return Response(status=403)
219
- body = request.get_json()
220
- stream = False
221
- tokenize = False
222
- if(is_present(body, "stream")): stream = body["stream"]
223
- if(is_present(body, "tokenize")): tokenize = body["tokenize"]
224
- postData = make_postData(body, chat=False, stream=stream)
225
-
226
- promptToken = []
227
- if (tokenize):
228
- tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json()
229
- promptToken = tokenData["tokens"]
230
-
231
- if (not stream):
232
- data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData))
233
- print(data.json())
234
- resData = make_resData(data.json(), chat=False, promptToken=promptToken)
235
- return jsonify(resData)
236
- else:
237
- def generate():
238
- data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData), stream=True)
239
- time_now = int(time.time())
240
- for line in data.iter_lines():
241
- if line:
242
- decoded_line = line.decode('utf-8')
243
- resData = make_resData_stream(json.loads(decoded_line[6:]), chat=False, time_now=time_now)
244
- yield 'data: {}\n'.format(json.dumps(resData))
245
- return Response(generate(), mimetype='text/event-stream')
246
-
247
- if __name__ == '__main__':
248
- app.run(args.host, port=args.port)
 
1
+ OPENAI_API_KEY="eeeeeeeeeeee"
 
 
 
 
 
 
2
 
3
+ from open_ai_file import OpenAI
4
+ from flask import Flask, request
5
+ #from dotenv import load_dotenv
6
+ from flask_cors import CORS
7
+
8
+ # ------------------ SETUP ------------------
9
+
10
+ #load_dotenv()
11
 
12
  app = Flask(__name__)
13
+
14
+ # this will need to be reconfigured before taking the app to production
15
+ cors = CORS(app)
16
+
17
+
18
+
19
+
20
+
21
+ # ------------------ EXCEPTION HANDLERS ------------------
22
+
23
+ # Sends response back to Deep Chat using the Response format:
24
+ # https://deepchat.dev/docs/connect/#Response
25
+ @app.errorhandler(Exception)
26
+ def handle_exception(e):
27
+ print(e)
28
+ return {"error": str(e)}, 500
29
+
30
+ @app.errorhandler(ConnectionError)
31
+ def handle_exception(e):
32
+ print(e)
33
+ return {"error": "Internal service error"}, 500
34
+
35
+ # ------------------ OPENAI API ------------------
36
+
37
+ open_ai = OpenAI()
38
+
39
+ @app.route("/openai-chat", methods=["POST"])
40
+ def openai_chat():
41
+ body = request.json
42
+ return open_ai.chat(body)
43
+
44
+ @app.route("/openai-chat-stream", methods=["POST"])
45
+ def openai_chat_stream():
46
+ body = request.json
47
+ return open_ai.chat_stream(body)
48
+
49
+ @app.route("/openai-image", methods=["POST"])
50
+ def openai_image():
51
+ files = request.files.getlist("files")
52
+ return open_ai.image_variation(files)
53
+
54
+ if __name__ == "__main__":
55
+ app.run(port=7860)