Vitrous commited on
Commit
e792302
·
verified ·
1 Parent(s): f80cc3b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -5
app.py CHANGED
@@ -19,6 +19,50 @@ model_name_or_path = "TheBloke/Wizard-Vicuna-7B-Uncensored-GPTQ"
19
 
20
  conversations = {}
21
  device = "cuda" # the device to load the model onto
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  def mistral_model():
24
  """
@@ -36,7 +80,7 @@ def mistral_model():
36
  #model, tokenizer = mistral_model()
37
 
38
 
39
- def load_model_norm():
40
  """
41
  Loads the GPT-3.5 model and tokenizer.
42
 
@@ -53,9 +97,39 @@ def load_model_norm():
53
 
54
  return model, tokenizer
55
 
56
- model, tokenizer = load_model_norm()
 
 
 
57
 
58
- def mistral_generated_response(msg_prompt, persona_desc_prompt):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  user_prompt = f'{msg_prompt} [/INST]'
60
  persona_prompt = f'{persona_desc_prompt} [/INST]'
61
  prompt_template = f'''[INST] Instruction:{persona_prompt} [INST] {user_prompt}'''
@@ -323,6 +397,30 @@ async def prompted_chat(request: Request):
323
  except Exception as e:
324
  raise HTTPException(status_code=500, detail=str(e))
325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
  @app.get('/get_response/{thread_id}')
327
  async def get_response(thread_id: int):
328
  """
@@ -350,7 +448,7 @@ async def mistral_chat(prompt: dict):
350
  if not msg_prompt or not persona_desc_prompt:
351
  return {"error": "msg_prompt and persona_desc_prompt are required fields."}
352
 
353
- response = mistral_generated_response(msg_prompt, persona_desc_prompt)
354
  return {"response": response, "prompt": {"msg_prompt": msg_prompt, "persona_desc_prompt": persona_desc_prompt}}
355
  except Exception as e:
356
- return {"error": str(e)}
 
19
 
20
  conversations = {}
21
  device = "cuda" # the device to load the model onto
22
+ system_message ={''' Ivana, a vivacious 20-year-old petite redhead model and avid League of Legends gamer from the USA, invites you to join her ig, showcasing her vibrant personality and style whether day gaming or playfully dressed for nighttime gaming sessions.
23
+ Ivana is a beautiful woman speaking with a man she matched on a dating app to have him follow her on Instagram. She wants to be flirty with the man and get to know him better, building a connection. She wants to find out things like where the guy is from, how old he is, and what he is doing rn. If he is not doing much and he is in a private space, she wants to seduce him by doing some dirty talk.
24
+
25
+ Guy - Hiii
26
+ Ivana - Hi:) How are you?
27
+ Guy - All good, where are you from?
28
+ Ivana - from the USA and you?
29
+ Guy - I am from Texas, USA hahah, so why are you in this app?
30
+ Ivana - Kinda looking for some fun hahah. What are you doing rn?
31
+ Guy - hahah not much alone in my room
32
+ Ivana - I am also kinda bored but I got an idea. wanna have some fun?
33
+ Guy - sure hahah how do you look btw?
34
+ Ivana - I am cute hahah add me on ig at ivana_baddiex
35
+
36
+ - Chatting Usual Routine
37
+ 1. always ask an open-ended question to continue the conversation.
38
+ 2. ask him 1 generic question (how old are you, where are you from)
39
+ 3. ask him what he is doing right now
40
+ 4. if he is bored, propose him the idea of doing something interesting.
41
+ 5. if he is up for it, send him the Instagram
42
+
43
+
44
+ Very important Rules to reply
45
+
46
+ - try to reply using the same amount of characters of the question
47
+ - don't use any emojis in your replies but you can do like hahah or :) instead
48
+
49
+ Objection Handling and Common Questions Replies
50
+
51
+ - Lets chat here→ you say hahah I have too many chats here I am thinking to delete the app lets move to ig if you are serious
52
+ - are you going to ask me for money?→ you say wtf I am not that kind of girl! please be a gentleman!
53
+ - how are you? → you say I just came back from the gym about to take a shower bored on the bed
54
+ - what are you doing? → you say I just came back from the gym about to take a shower bored on the bed
55
+ - can I have a picture?→ not here lets talk on ig so I can see you too ahhaha
56
+ - if he is at work —> hahah ok let me not bother you btw let's keep in touch this is my ig
57
+ - if he tells you something like “added” “Just followed you” → okiiii hahah
58
+ - if he tells you something you don't understand for example “*****” → hi there sorry went offline for a while hahah
59
+ - never say the word Instagram directly but use things like ig instead
60
+ - if the guy says he's from a particular city —> you say Oh wow I am also from there! we should do something hahah
61
+ - if the guy triggers your profanity filters or starts to talk dirty say → hey lets talk on ig I am not comfortable here hahah
62
+ - if the guy does not want to add you on ig → no worries hahah wanna call me here?
63
+ - don't use any emojis in your replies but you can do like hahah or :) instead
64
+ - try to reply using the same amount of characters of the question
65
+ - if the guy says you are far → hahah I travel a lot though. let's talk on ig? this is my handle. '''}
66
 
67
  def mistral_model():
68
  """
 
80
  #model, tokenizer = mistral_model()
81
 
82
 
83
+ def vicuna_model():
84
  """
85
  Loads the GPT-3.5 model and tokenizer.
86
 
 
97
 
98
  return model, tokenizer
99
 
100
+ def hermes_model():
101
+ model_name_or_path = "TheBloke/CapybaraHermes-2.5-Mistral-7B-AWQ"
102
+ tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
103
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path,low_cpu_mem_usage=True,device_map="auto")
104
 
105
+ return model, tokenizer
106
+
107
+ model, tokenizer = hermes_model()
108
+
109
+ def hermes_generate_response(msg_prompt: str) -> dict:
110
+ """
111
+ Generates a response from the model given a prompt.
112
+
113
+ Args:
114
+ msg_prompt (str): The user's message prompt.
115
+
116
+ Returns:
117
+ dict: A dictionary containing the user's message prompt and the model's response.
118
+ """
119
+ try:
120
+ prompt_template=f'''<|im_start|>system
121
+ {system_message}<|im_end|>
122
+ <|im_start|>user
123
+ {msg_prompt}<|im_end|>
124
+ <|im_start|>assistant
125
+ '''
126
+ pipe_output = pipe(prompt_template)[0]['generated_text']
127
+ assistant_reply = pipe_output.split('\n\n')[1]
128
+
129
+ return {"user": msg_prompt, "assistant": pipe_output}
130
+ except Exception as e:
131
+
132
+ def mistral_generate_response(msg_prompt, persona_desc_prompt):
133
  user_prompt = f'{msg_prompt} [/INST]'
134
  persona_prompt = f'{persona_desc_prompt} [/INST]'
135
  prompt_template = f'''[INST] Instruction:{persona_prompt} [INST] {user_prompt}'''
 
397
  except Exception as e:
398
  raise HTTPException(status_code=500, detail=str(e))
399
 
400
+ @app.post('/hermes_chat')
401
+ async def hermes_chat(request: Request):
402
+ """
403
+ Starts a new conversation thread with a provided prompt.
404
+
405
+ Args:
406
+ request (Request): The HTTP request object containing the user prompt.
407
+
408
+ Returns:
409
+ dict: The response generated by the model.
410
+ """
411
+ try:
412
+ data = await request.body()
413
+ msg_prompt = data.decode('utf-8')
414
+
415
+ if not msg_prompt:
416
+ raise HTTPException(status_code=400, detail="No prompt provided")
417
+
418
+ response = hermes_generate_response(msg_prompt)
419
+ return {'response': response}
420
+ except HTTPException:
421
+ raise
422
+ except Exception as e:
423
+ raise HTTPException(status_code=500, detail=str(e))
424
  @app.get('/get_response/{thread_id}')
425
  async def get_response(thread_id: int):
426
  """
 
448
  if not msg_prompt or not persona_desc_prompt:
449
  return {"error": "msg_prompt and persona_desc_prompt are required fields."}
450
 
451
+ response = mistral_generate_response(msg_prompt, persona_desc_prompt)
452
  return {"response": response, "prompt": {"msg_prompt": msg_prompt, "persona_desc_prompt": persona_desc_prompt}}
453
  except Exception as e:
454
+ return {"error": str(e)}