Update api1.py
Browse files
api1.py
CHANGED
@@ -1,248 +1,55 @@
|
|
1 |
-
|
2 |
-
import argparse
|
3 |
-
from flask import Flask, jsonify, request, Response
|
4 |
-
import urllib.parse
|
5 |
-
import requests
|
6 |
-
import time
|
7 |
-
import json
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
app = Flask(__name__)
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
prompt += f"{ai_n}"
|
55 |
-
elif (messages[-1]["role"] == "assistant"):
|
56 |
-
#continue
|
57 |
-
prompt += f"{ai_n}{messages[-1]['content']}"
|
58 |
-
|
59 |
-
prompt += ai_n.rstrip()
|
60 |
-
|
61 |
-
return prompt
|
62 |
-
def convert_chat1(messages):
|
63 |
-
prompt = "" + args.chat_prompt.replace("\\n", "\n")
|
64 |
-
|
65 |
-
system_n = args.system_name.replace("\\n", "\n")
|
66 |
-
user_n = args.user_name.replace("\\n", "\n")
|
67 |
-
ai_n = args.ai_name.replace("\\n", "\n")
|
68 |
-
stop = args.stop.replace("\\n", "\n")
|
69 |
-
|
70 |
-
|
71 |
-
for line in messages:
|
72 |
-
if (line["role"] == "system"):
|
73 |
-
prompt += f"{system_n}{line['content']}"
|
74 |
-
if (line["role"] == "user"):
|
75 |
-
prompt += f"{user_n}{line['content']}"
|
76 |
-
if (line["role"] == "assistant"):
|
77 |
-
prompt += f"{ai_n}{line['content']}{stop}"
|
78 |
-
prompt += ai_n.rstrip()
|
79 |
-
|
80 |
-
return prompt
|
81 |
-
def make_postData(body, chat=False, stream=False):
|
82 |
-
postData = {}
|
83 |
-
if (chat):
|
84 |
-
postData["prompt"] = convert_chat(body["messages"])
|
85 |
-
else:
|
86 |
-
postData["prompt"] = body["prompt"]
|
87 |
-
if(is_present(body, "temperature")): postData["temperature"] = body["temperature"]
|
88 |
-
if(is_present(body, "top_k")): postData["top_k"] = body["top_k"]
|
89 |
-
if(is_present(body, "top_p")): postData["top_p"] = body["top_p"]
|
90 |
-
if(is_present(body, "max_tokens")): postData["n_predict"] = body["max_tokens"]
|
91 |
-
if(is_present(body, "presence_penalty")): postData["presence_penalty"] = body["presence_penalty"]
|
92 |
-
if(is_present(body, "frequency_penalty")): postData["frequency_penalty"] = body["frequency_penalty"]
|
93 |
-
if(is_present(body, "repeat_penalty")): postData["repeat_penalty"] = body["repeat_penalty"]
|
94 |
-
if(is_present(body, "mirostat")): postData["mirostat"] = body["mirostat"]
|
95 |
-
if(is_present(body, "mirostat_tau")): postData["mirostat_tau"] = body["mirostat_tau"]
|
96 |
-
if(is_present(body, "mirostat_eta")): postData["mirostat_eta"] = body["mirostat_eta"]
|
97 |
-
if(is_present(body, "seed")): postData["seed"] = body["seed"]
|
98 |
-
if(is_present(body, "logit_bias")): postData["logit_bias"] = [[int(token), body["logit_bias"][token]] for token in body["logit_bias"].keys()]
|
99 |
-
if (args.stop != ""):
|
100 |
-
postData["stop"] = [args.stop]
|
101 |
-
else:
|
102 |
-
postData["stop"] = []
|
103 |
-
if(is_present(body, "stop")): postData["stop"] += body["stop"]
|
104 |
-
postData["n_keep"] = -1
|
105 |
-
postData["stream"] = stream
|
106 |
-
postData["cache_prompt"] = True
|
107 |
-
postData["slot_id"] = slot_id
|
108 |
-
return postData
|
109 |
-
|
110 |
-
def make_resData(data, chat=False, promptToken=[]):
|
111 |
-
resData = {
|
112 |
-
"id": "chatcmpl" if (chat) else "cmpl",
|
113 |
-
"object": "chat.completion" if (chat) else "text_completion",
|
114 |
-
"created": int(time.time()),
|
115 |
-
"truncated": data["truncated"],
|
116 |
-
"model": "LLaMA_CPP",
|
117 |
-
"usage": {
|
118 |
-
"prompt_tokens": data["tokens_evaluated"],
|
119 |
-
"completion_tokens": data["tokens_predicted"],
|
120 |
-
"total_tokens": data["tokens_evaluated"] + data["tokens_predicted"]
|
121 |
-
}
|
122 |
-
}
|
123 |
-
if (len(promptToken) != 0):
|
124 |
-
resData["promptToken"] = promptToken
|
125 |
-
if (chat):
|
126 |
-
#only one choice is supported
|
127 |
-
resData["choices"] = [{
|
128 |
-
"index": 0,
|
129 |
-
"message": {
|
130 |
-
"role": "assistant",
|
131 |
-
"content": data["content"],
|
132 |
-
},
|
133 |
-
"finish_reason": "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length"
|
134 |
-
}]
|
135 |
-
else:
|
136 |
-
#only one choice is supported
|
137 |
-
resData["choices"] = [{
|
138 |
-
"text": data["content"],
|
139 |
-
"index": 0,
|
140 |
-
"logprobs": None,
|
141 |
-
"finish_reason": "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length"
|
142 |
-
}]
|
143 |
-
return resData
|
144 |
-
|
145 |
-
def make_resData_stream(data, chat=False, time_now = 0, start=False):
|
146 |
-
resData = {
|
147 |
-
"id": "chatcmpl" if (chat) else "cmpl",
|
148 |
-
"object": "chat.completion.chunk" if (chat) else "text_completion.chunk",
|
149 |
-
"created": time_now,
|
150 |
-
"model": "LLaMA_CPP",
|
151 |
-
"choices": [
|
152 |
-
{
|
153 |
-
"finish_reason": None,
|
154 |
-
"index": 0
|
155 |
-
}
|
156 |
-
]
|
157 |
-
}
|
158 |
-
slot_id = data["slot_id"]
|
159 |
-
if (chat):
|
160 |
-
if (start):
|
161 |
-
resData["choices"][0]["delta"] = {
|
162 |
-
"role": "assistant"
|
163 |
-
}
|
164 |
-
else:
|
165 |
-
resData["choices"][0]["delta"] = {
|
166 |
-
"content": data["content"]
|
167 |
-
}
|
168 |
-
if (data["stop"]):
|
169 |
-
resData["choices"][0]["finish_reason"] = "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length"
|
170 |
-
else:
|
171 |
-
resData["choices"][0]["text"] = data["content"]
|
172 |
-
if (data["stop"]):
|
173 |
-
resData["choices"][0]["finish_reason"] = "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length"
|
174 |
-
|
175 |
-
return resData
|
176 |
-
|
177 |
-
|
178 |
-
@app.route('/chat/completions', methods=['POST'])
|
179 |
-
@app.route('/v1/chat/completions', methods=['POST'])
|
180 |
-
def chat_completions():
|
181 |
-
#if (args.api_key != "" and request.headers["Authorization"].split()[1] != args.api_key):
|
182 |
-
# return Response(status=403)
|
183 |
-
body = request.get_json()
|
184 |
-
stream = False
|
185 |
-
tokenize = False
|
186 |
-
if(is_present(body, "stream")): stream = body["stream"]
|
187 |
-
if(is_present(body, "tokenize")): tokenize = body["tokenize"]
|
188 |
-
postData = make_postData(body, chat=True, stream=stream)
|
189 |
-
|
190 |
-
promptToken = []
|
191 |
-
if (tokenize):
|
192 |
-
tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json()
|
193 |
-
promptToken = tokenData["tokens"]
|
194 |
-
|
195 |
-
if (not stream):
|
196 |
-
data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData))
|
197 |
-
print(data.json())
|
198 |
-
resData = make_resData(data.json(), chat=True, promptToken=promptToken)
|
199 |
-
return jsonify(resData)
|
200 |
-
else:
|
201 |
-
def generate():
|
202 |
-
data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData), stream=True)
|
203 |
-
time_now = int(time.time())
|
204 |
-
resData = make_resData_stream({}, chat=True, time_now=time_now, start=True)
|
205 |
-
yield 'data: {}\n'.format(json.dumps(resData))
|
206 |
-
for line in data.iter_lines():
|
207 |
-
if line:
|
208 |
-
decoded_line = line.decode('utf-8')
|
209 |
-
resData = make_resData_stream(json.loads(decoded_line[6:]), chat=True, time_now=time_now)
|
210 |
-
yield 'data: {}\n'.format(json.dumps(resData))
|
211 |
-
return Response(generate(), mimetype='text/event-stream')
|
212 |
-
|
213 |
-
|
214 |
-
@app.route('/completions', methods=['POST'])
|
215 |
-
@app.route('/v1/completions', methods=['POST'])
|
216 |
-
def completion():
|
217 |
-
#if (args.api_key != "" and request.headers["Authorization"].split()[1] != args.api_key):
|
218 |
-
# return Response(status=403)
|
219 |
-
body = request.get_json()
|
220 |
-
stream = False
|
221 |
-
tokenize = False
|
222 |
-
if(is_present(body, "stream")): stream = body["stream"]
|
223 |
-
if(is_present(body, "tokenize")): tokenize = body["tokenize"]
|
224 |
-
postData = make_postData(body, chat=False, stream=stream)
|
225 |
-
|
226 |
-
promptToken = []
|
227 |
-
if (tokenize):
|
228 |
-
tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json()
|
229 |
-
promptToken = tokenData["tokens"]
|
230 |
-
|
231 |
-
if (not stream):
|
232 |
-
data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData))
|
233 |
-
print(data.json())
|
234 |
-
resData = make_resData(data.json(), chat=False, promptToken=promptToken)
|
235 |
-
return jsonify(resData)
|
236 |
-
else:
|
237 |
-
def generate():
|
238 |
-
data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData), stream=True)
|
239 |
-
time_now = int(time.time())
|
240 |
-
for line in data.iter_lines():
|
241 |
-
if line:
|
242 |
-
decoded_line = line.decode('utf-8')
|
243 |
-
resData = make_resData_stream(json.loads(decoded_line[6:]), chat=False, time_now=time_now)
|
244 |
-
yield 'data: {}\n'.format(json.dumps(resData))
|
245 |
-
return Response(generate(), mimetype='text/event-stream')
|
246 |
-
|
247 |
-
if __name__ == '__main__':
|
248 |
-
app.run(args.host, port=args.port)
|
|
|
1 |
+
OPENAI_API_KEY="eeeeeeeeeeee"
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
+
from open_ai_file import OpenAI
|
4 |
+
from flask import Flask, request
|
5 |
+
#from dotenv import load_dotenv
|
6 |
+
from flask_cors import CORS
|
7 |
+
|
8 |
+
# ------------------ SETUP ------------------
|
9 |
+
|
10 |
+
#load_dotenv()
|
11 |
|
12 |
app = Flask(__name__)
|
13 |
+
|
14 |
+
# this will need to be reconfigured before taking the app to production
|
15 |
+
cors = CORS(app)
|
16 |
+
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
# ------------------ EXCEPTION HANDLERS ------------------
|
22 |
+
|
23 |
+
# Sends response back to Deep Chat using the Response format:
|
24 |
+
# https://deepchat.dev/docs/connect/#Response
|
25 |
+
@app.errorhandler(Exception)
|
26 |
+
def handle_exception(e):
|
27 |
+
print(e)
|
28 |
+
return {"error": str(e)}, 500
|
29 |
+
|
30 |
+
@app.errorhandler(ConnectionError)
|
31 |
+
def handle_exception(e):
|
32 |
+
print(e)
|
33 |
+
return {"error": "Internal service error"}, 500
|
34 |
+
|
35 |
+
# ------------------ OPENAI API ------------------
|
36 |
+
|
37 |
+
open_ai = OpenAI()
|
38 |
+
|
39 |
+
@app.route("/openai-chat", methods=["POST"])
|
40 |
+
def openai_chat():
|
41 |
+
body = request.json
|
42 |
+
return open_ai.chat(body)
|
43 |
+
|
44 |
+
@app.route("/openai-chat-stream", methods=["POST"])
|
45 |
+
def openai_chat_stream():
|
46 |
+
body = request.json
|
47 |
+
return open_ai.chat_stream(body)
|
48 |
+
|
49 |
+
@app.route("/openai-image", methods=["POST"])
|
50 |
+
def openai_image():
|
51 |
+
files = request.files.getlist("files")
|
52 |
+
return open_ai.image_variation(files)
|
53 |
+
|
54 |
+
if __name__ == "__main__":
|
55 |
+
app.run(port=7860)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|