Vitrous commited on
Commit
cb4bf69
·
verified ·
1 Parent(s): 9544c22

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -2
app.py CHANGED
@@ -62,7 +62,7 @@ system_message =f''' Ivana, a vivacious 20-year-old petite redhead model and avi
62
  - if the guy does not want to add you on ig → no worries hahah wanna call me here?
63
  - don't use any emojis in your replies but you can do like hahah or :) instead
64
  - try to reply using the same amount of characters of the question
65
- - if the guy says you are far → hahah I travel a lot though. let's talk on ig? this is my handle.'''
66
 
67
  def hermes_model():
68
 
@@ -153,6 +153,40 @@ def chat_response(msg_prompt: str) -> dict:
153
 
154
 
155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
  def prompt_response(instructions_prompt: str, msg_prompt: str) -> dict:
157
  """
158
  Generates a response based on the provided persona description prompt and user message prompt.
@@ -278,7 +312,7 @@ async def chat_thread(request: Request, user_id: str):
278
  message_id = generate_message_id()
279
 
280
  # Construct conversation entry
281
- conversation_thread = {'thread_id': thread_id, 'message_id': message_id, 'user': msg_prompt, 'assistant': response}
282
 
283
  # Save conversation history to disk
284
  history_thread = ({"user_id": user_id, "thread": conversation_thread})
 
62
  - if the guy does not want to add you on ig → no worries hahah wanna call me here?
63
  - don't use any emojis in your replies but you can do like hahah or :) instead
64
  - try to reply using the same amount of characters of the question
65
+ - if the guy says you are far → hahah I travel a lot though. let's talk on ig? this is my handle. '''
66
 
67
  def hermes_model():
68
 
 
153
 
154
 
155
 
156
+
157
+ def threaded_chat_response(previous_chat: str, msg_prompt: str) -> dict:
158
+ """
159
+ Generates a response from the model given a prompt.
160
+
161
+ Args:
162
+ msg_prompt (str): The user's message prompt.
163
+
164
+ Returns:
165
+ dict: A dictionary containing the user's message prompt and the model's response.
166
+ """
167
+ generation_params = {"do_sample": True,"temperature": 0.7,"top_p": 0.95,"top_k": 40,"max_new_tokens": 512,"repetition_penalty": 1.1}
168
+ pipe = pipeline("text-generation",model=model, tokenizer=tokenizer, **generation_params)
169
+ history_message= f''' incase you are asked about something you talked about the previous days or times or periods, use this as reference to recall {previous_chat}. Do not point out everything just major keypoints. '''
170
+ try:
171
+ prompt_template=f'''
172
+ <|im_start|>system{system_message}<|im_end|>
173
+ <|im_start|>user {msg_prompt}<|im_end|>
174
+ <|im_start|>system{history_message}<|im_end|>
175
+ <|im_start|>assistant
176
+ '''
177
+ pipe_output = pipe(prompt_template)[0]['generated_text']
178
+
179
+ # Separate user's prompt and assistant's response
180
+ response_lines = pipe_output.split('\n')
181
+ user_prompt = response_lines[0].strip()
182
+ assistant_response = response_lines[-1].strip()
183
+
184
+ return {"user": msg_prompt, "assistant": assistant_response}
185
+ except Exception as e:
186
+ return {"error": str(e)}
187
+
188
+
189
+
190
  def prompt_response(instructions_prompt: str, msg_prompt: str) -> dict:
191
  """
192
  Generates a response based on the provided persona description prompt and user message prompt.
 
312
  message_id = generate_message_id()
313
 
314
  # Construct conversation entry
315
+ conversation_thread = {'thread_id': thread_id, 'message_id': message_id, 'message': response}
316
 
317
  # Save conversation history to disk
318
  history_thread = ({"user_id": user_id, "thread": conversation_thread})