Update app.py
Browse files
app.py
CHANGED
@@ -105,22 +105,6 @@ def generate_message_id():
|
|
105 |
return generate_id()
|
106 |
|
107 |
|
108 |
-
def save_conversation(user_id, conversation):
|
109 |
-
"""
|
110 |
-
Save conversation history to disk.
|
111 |
-
|
112 |
-
Args:
|
113 |
-
user_id (str): The unique identifier for the user.
|
114 |
-
conversation (dict): The conversation data.
|
115 |
-
hf_space_path (str): The path to the Hugging Face Space.
|
116 |
-
"""
|
117 |
-
|
118 |
-
# Add the new conversation to the dataset
|
119 |
-
dataset = dataset.add({"user_id": user_id, "conversation": conversation})
|
120 |
-
|
121 |
-
# Save the updated dataset back to the JSON file
|
122 |
-
dataset.save_to_disk("articko/conversations/conversations.jsonl")
|
123 |
-
|
124 |
def chat_response(msg_prompt: str) -> dict:
|
125 |
"""
|
126 |
Generates a response from the model given a prompt.
|
@@ -154,9 +138,9 @@ def chat_response(msg_prompt: str) -> dict:
|
|
154 |
|
155 |
|
156 |
|
157 |
-
def
|
158 |
"""
|
159 |
-
Generates a response from the model
|
160 |
|
161 |
Args:
|
162 |
msg_prompt (str): The user's message prompt.
|
@@ -164,14 +148,18 @@ def threaded_chat_response(previous_chat: str, msg_prompt: str) -> dict:
|
|
164 |
Returns:
|
165 |
dict: A dictionary containing the user's message prompt and the model's response.
|
166 |
"""
|
|
|
|
|
|
|
|
|
167 |
generation_params = {"do_sample": True,"temperature": 0.7,"top_p": 0.95,"top_k": 40,"max_new_tokens": 512,"repetition_penalty": 1.1}
|
168 |
pipe = pipeline("text-generation",model=model, tokenizer=tokenizer, **generation_params)
|
169 |
-
|
170 |
|
171 |
if previous_chat:
|
172 |
|
173 |
prompt_template=f'''
|
174 |
-
<|im_start|>system{system_message}{
|
175 |
<|im_start|>user {msg_prompt}<|im_end|>
|
176 |
<|im_start|>assistant
|
177 |
'''
|
@@ -181,8 +169,6 @@ def threaded_chat_response(previous_chat: str, msg_prompt: str) -> dict:
|
|
181 |
<|im_start|>user {msg_prompt}<|im_end|>
|
182 |
<|im_start|>assistant
|
183 |
'''
|
184 |
-
|
185 |
-
|
186 |
try:
|
187 |
|
188 |
pipe_output = pipe(prompt_template)[0]['generated_text']
|
@@ -198,7 +184,7 @@ def threaded_chat_response(previous_chat: str, msg_prompt: str) -> dict:
|
|
198 |
|
199 |
|
200 |
|
201 |
-
def
|
202 |
"""
|
203 |
Generates a response based on the provided persona description prompt and user message prompt.
|
204 |
|
@@ -209,31 +195,29 @@ def prompt_response(instructions_prompt: str, msg_prompt: str) -> dict:
|
|
209 |
Returns:
|
210 |
dict: A dictionary containing the user's msg_prompt and the model's response.
|
211 |
"""
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
"max_new_tokens": 512,
|
223 |
-
"repetition_penalty": 1.1
|
224 |
-
}
|
225 |
-
|
226 |
-
# Create a pipeline for text generation
|
227 |
-
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, **generation_params)
|
228 |
|
229 |
-
|
230 |
-
|
231 |
-
{
|
232 |
-
<|im_start|>user
|
233 |
-
{msg_prompt}<|im_end|>
|
234 |
<|im_start|>assistant
|
235 |
'''
|
236 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
237 |
# Generate response using the pipeline
|
238 |
pipe_output = pipe(prompt_template)[0]['generated_text']
|
239 |
|
@@ -268,35 +252,8 @@ async def api_home():
|
|
268 |
"""
|
269 |
return HTMLResponse(content=html_content, status_code=200)
|
270 |
|
271 |
-
@app.post('/
|
272 |
-
async def
|
273 |
-
"""
|
274 |
-
Starts a new conversation thread with a provided prompt.
|
275 |
-
|
276 |
-
Args:
|
277 |
-
request (Request): The HTTP request object containing the user prompt.
|
278 |
-
|
279 |
-
Returns:
|
280 |
-
dict: The response generated by the model.
|
281 |
-
"""
|
282 |
-
try:
|
283 |
-
|
284 |
-
data = await request.json()
|
285 |
-
msg_prompt = data.get('msg_prompt')
|
286 |
-
|
287 |
-
if not msg_prompt:
|
288 |
-
raise HTTPException(status_code=400, detail="No prompt provided")
|
289 |
-
response = chat_response(msg_prompt)
|
290 |
-
thread_id = len(conversations) + 1
|
291 |
-
conversations[thread_id] = {'prompt': msg_prompt, 'responses': [response]}
|
292 |
-
return {'thread_id': thread_id, 'response': response}
|
293 |
-
except HTTPException:
|
294 |
-
raise
|
295 |
-
except Exception as e:
|
296 |
-
raise HTTPException(status_code=500, detail=str(e))
|
297 |
-
|
298 |
-
@app.post('/chat_thread/{user_id}')
|
299 |
-
async def chat_thread(request: Request, user_id: str):
|
300 |
"""
|
301 |
Starts a new conversation thread with a provided prompt for a specific user.
|
302 |
|
@@ -312,13 +269,13 @@ async def chat_thread(request: Request, user_id: str):
|
|
312 |
|
313 |
data = await request.json()
|
314 |
msg_prompt = data.get('msg_prompt')
|
315 |
-
|
316 |
|
317 |
if not msg_prompt:
|
318 |
raise HTTPException(status_code=400, detail="Prompt not provided")
|
319 |
|
320 |
# Generate response
|
321 |
-
response =
|
322 |
|
323 |
# Generate message ID
|
324 |
message_id = generate_message_id()
|
@@ -327,18 +284,19 @@ async def chat_thread(request: Request, user_id: str):
|
|
327 |
conversation_thread = {'thread_id': thread_id, 'message_id': message_id, 'message': response}
|
328 |
|
329 |
# Save conversation history to disk
|
330 |
-
|
331 |
|
332 |
-
|
333 |
# Return response and thread ID
|
334 |
-
return {'response':
|
335 |
except HTTPException as e:
|
336 |
raise e
|
337 |
except Exception as e:
|
338 |
raise HTTPException(status_code=500, detail=str(e))
|
|
|
339 |
|
340 |
-
|
341 |
-
|
|
|
342 |
"""
|
343 |
Starts a new chat thread with a provided user message prompt and persona description of the ai assistant .
|
344 |
|
@@ -349,40 +307,31 @@ async def prompted_chat(request: Request):
|
|
349 |
dict: The thread ID and the response generated by the model.
|
350 |
"""
|
351 |
try:
|
|
|
|
|
352 |
data = await request.json()
|
|
|
353 |
msg_prompt = data.get('msg_prompt')
|
354 |
-
|
355 |
-
|
356 |
-
if not msg_prompt or not persona_desc:
|
357 |
-
raise HTTPException(status_code=400, detail="Both prompt and person_description are required")
|
358 |
-
|
359 |
-
response = prompt_response(persona_desc, msg_prompt)
|
360 |
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
return {'thread_id': thread_id, 'response': response}
|
365 |
-
except HTTPException:
|
366 |
-
raise
|
367 |
-
except Exception as e:
|
368 |
-
raise HTTPException(status_code=500, detail=str(e))
|
369 |
|
370 |
-
|
371 |
-
|
372 |
-
"""
|
373 |
-
Retrieves the response of a conversation thread by its ID.
|
374 |
-
|
375 |
-
Args:
|
376 |
-
thread_id (int): The ID of the conversation thread.
|
377 |
-
|
378 |
-
Returns:
|
379 |
-
dict: The response of the conversation thread.
|
380 |
-
"""
|
381 |
-
if thread_id not in conversations:
|
382 |
-
raise HTTPException(status_code=404, detail="Thread not found")
|
383 |
|
384 |
-
|
385 |
-
|
386 |
|
387 |
-
|
|
|
388 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
return generate_id()
|
106 |
|
107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
def chat_response(msg_prompt: str) -> dict:
|
109 |
"""
|
110 |
Generates a response from the model given a prompt.
|
|
|
138 |
|
139 |
|
140 |
|
141 |
+
def thread_response(msg_history: str, msg_prompt: str) -> dict:
|
142 |
"""
|
143 |
+
Generates a response from the model using the system prompt written above.
|
144 |
|
145 |
Args:
|
146 |
msg_prompt (str): The user's message prompt.
|
|
|
148 |
Returns:
|
149 |
dict: A dictionary containing the user's message prompt and the model's response.
|
150 |
"""
|
151 |
+
# Validate input parameters
|
152 |
+
if not msg_prompt:
|
153 |
+
raise ValueError("Message prompt cannot be empty.")
|
154 |
+
|
155 |
generation_params = {"do_sample": True,"temperature": 0.7,"top_p": 0.95,"top_k": 40,"max_new_tokens": 512,"repetition_penalty": 1.1}
|
156 |
pipe = pipeline("text-generation",model=model, tokenizer=tokenizer, **generation_params)
|
157 |
+
message_history= f''' incase you are asked about something you talked about the previous days or times or periods, use this as reference to recall {msg_history}. Do not point out everything just major keypoints. If nothing is mentioned about the past conversation ingore this. '''
|
158 |
|
159 |
if previous_chat:
|
160 |
|
161 |
prompt_template=f'''
|
162 |
+
<|im_start|>system{system_message}{message_history}<|im_end|>
|
163 |
<|im_start|>user {msg_prompt}<|im_end|>
|
164 |
<|im_start|>assistant
|
165 |
'''
|
|
|
169 |
<|im_start|>user {msg_prompt}<|im_end|>
|
170 |
<|im_start|>assistant
|
171 |
'''
|
|
|
|
|
172 |
try:
|
173 |
|
174 |
pipe_output = pipe(prompt_template)[0]['generated_text']
|
|
|
184 |
|
185 |
|
186 |
|
187 |
+
def chat_thread_response(ai_persona: str,msg_history: str, msg_prompt: str) -> dict:
|
188 |
"""
|
189 |
Generates a response based on the provided persona description prompt and user message prompt.
|
190 |
|
|
|
195 |
Returns:
|
196 |
dict: A dictionary containing the user's msg_prompt and the model's response.
|
197 |
"""
|
198 |
+
|
199 |
+
# Validate input parameters
|
200 |
+
if not msg_prompt:
|
201 |
+
raise ValueError("Message prompt cannot be empty.")
|
202 |
+
|
203 |
+
generation_params = {"do_sample": True,"temperature": 0.7,"top_p": 0.95,"top_k": 40,"max_new_tokens": 512,"repetition_penalty": 1.1}
|
204 |
+
pipe = pipeline("text-generation",model=model, tokenizer=tokenizer, **generation_params)
|
205 |
+
message_history= f''' incase you are asked about something you talked about the previous days or times or periods, use this as reference to recall {msg_history}. Do not point out everything just major keypoints. If nothing is mentioned about the past conversation ingore this. '''
|
206 |
+
|
207 |
+
if previous_chat:
|
|
|
|
|
|
|
|
|
|
|
|
|
208 |
|
209 |
+
prompt_template=f'''
|
210 |
+
<|im_start|>system{ai_persona}{message_history}<|im_end|>
|
211 |
+
<|im_start|>user {msg_prompt}<|im_end|>
|
|
|
|
|
212 |
<|im_start|>assistant
|
213 |
'''
|
214 |
+
else:
|
215 |
+
prompt_template=f'''
|
216 |
+
<|im_start|>system{system_message}<|im_end|>
|
217 |
+
<|im_start|>user {msg_prompt}<|im_end|>
|
218 |
+
<|im_start|>assistant
|
219 |
+
'''
|
220 |
+
try:
|
221 |
# Generate response using the pipeline
|
222 |
pipe_output = pipe(prompt_template)[0]['generated_text']
|
223 |
|
|
|
252 |
"""
|
253 |
return HTMLResponse(content=html_content, status_code=200)
|
254 |
|
255 |
+
@app.post('/thread/{user_id}')
|
256 |
+
async def thread(request: Request, user_id: str):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
257 |
"""
|
258 |
Starts a new conversation thread with a provided prompt for a specific user.
|
259 |
|
|
|
269 |
|
270 |
data = await request.json()
|
271 |
msg_prompt = data.get('msg_prompt')
|
272 |
+
msg_history=data.get('msg_history')
|
273 |
|
274 |
if not msg_prompt:
|
275 |
raise HTTPException(status_code=400, detail="Prompt not provided")
|
276 |
|
277 |
# Generate response
|
278 |
+
response = thread_response(msg_history,msg_prompt)
|
279 |
|
280 |
# Generate message ID
|
281 |
message_id = generate_message_id()
|
|
|
284 |
conversation_thread = {'thread_id': thread_id, 'message_id': message_id, 'message': response}
|
285 |
|
286 |
# Save conversation history to disk
|
287 |
+
thread_history = ({"user_id": user_id, "thread": conversation_thread})
|
288 |
|
|
|
289 |
# Return response and thread ID
|
290 |
+
return {'response': thread_history}
|
291 |
except HTTPException as e:
|
292 |
raise e
|
293 |
except Exception as e:
|
294 |
raise HTTPException(status_code=500, detail=str(e))
|
295 |
+
|
296 |
|
297 |
+
|
298 |
+
@app.post('/chat_thread/{user_id}')
|
299 |
+
async def chat_thread(request: Request, user_id: str):
|
300 |
"""
|
301 |
Starts a new chat thread with a provided user message prompt and persona description of the ai assistant .
|
302 |
|
|
|
307 |
dict: The thread ID and the response generated by the model.
|
308 |
"""
|
309 |
try:
|
310 |
+
thread_id = generate_thread_id()
|
311 |
+
|
312 |
data = await request.json()
|
313 |
+
ai_persona= data.get('ai_persona')
|
314 |
msg_prompt = data.get('msg_prompt')
|
315 |
+
msg_history=data.get('msg_history')
|
316 |
+
|
|
|
|
|
|
|
|
|
317 |
|
318 |
+
if not msg_prompt:
|
319 |
+
raise HTTPException(status_code=400, detail="Prompt not provided")
|
|
|
|
|
|
|
|
|
|
|
|
|
320 |
|
321 |
+
# Generate response
|
322 |
+
response = chat_thread_response(ai_persona, msg_history, msg_prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
323 |
|
324 |
+
# Generate message ID
|
325 |
+
message_id = generate_message_id()
|
326 |
|
327 |
+
# Construct conversation entry
|
328 |
+
conversation_thread = {'thread_id': thread_id, 'message_id': message_id, 'message': response}
|
329 |
|
330 |
+
# Save conversation history to disk
|
331 |
+
thread_history = ({"user_id": user_id, "thread": conversation_thread})
|
332 |
+
# Return response and thread ID
|
333 |
+
return {'response': history_thread}
|
334 |
+
except HTTPException as e:
|
335 |
+
raise e
|
336 |
+
except Exception as e:
|
337 |
+
raise HTTPException(status_code=500, detail=str(e))
|