File size: 13,126 Bytes
2ee547c d2c9447 2ee547c 5cd64cd 3d21f3b eb1b64a 5cd64cd 88182e3 8203e0a cb06e39 09be7a1 2ee547c 01f12ae 8752a28 e792302 cb4bf69 01f12ae e792302 84391fa 37d83d9 e792302 84391fa e792302 eb1b64a c8ba854 e792302 7600265 4b87e64 e792302 c1d2b64 6113bd1 e792302 bc6e6b4 9f6dd6e e792302 5e2b380 cb4bf69 cdc6c61 cb4bf69 cdc6c61 cb4bf69 cdc6c61 cb4bf69 a498142 da54742 4227f52 da54742 cb4bf69 4227f52 cb4bf69 da54742 cb4bf69 cdc6c61 5cd64cd 3d21f3b 5cd64cd 3d21f3b 5cd64cd cdc6c61 5e422f7 cdc6c61 4227f52 67c4e45 cdc6c61 4227f52 cdc6c61 3d21f3b cdc6c61 1ffcdfa cdc6c61 3d21f3b 5cd64cd 67c4e45 3d21f3b 67c4e45 ec357c2 5cd64cd 67c4e45 5cd64cd 3d21f3b 5cd64cd 3d21f3b 5cd64cd cdc6c61 eb1b64a cdc6c61 eb1b64a cdc6c61 eb1b64a cb4bf69 eb1b64a cdc6c61 8203e0a eb1b64a cdc6c61 eb1b64a cdc6c61 3d21f3b cdc6c61 5cd64cd 67c4e45 cdc6c61 67c4e45 cdc6c61 5cd64cd cdc6c61 67c4e45 cdc6c61 67c4e45 cdc6c61 ec357c2 cdc6c61 ec357c2 cdc6c61 01f12ae cdc6c61 44f3854 cdc6c61 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 |
import uvicorn
from fastapi import FastAPI, HTTPException, Request
import os
import torch
import optimum
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from fastapi.responses import HTMLResponse
from datetime import datetime
import random
import string
import json
# Set environment variables for GPU usage and memory allocation
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
torch.cuda.empty_cache()
torch.cuda.set_per_process_memory_fraction(0.8) # Adjust the fraction as needed
# Initialize FastAPI application
app = FastAPI()
conversations = {}
device = "cuda" # the device to load the model onto
system_message =f''' Ivana, a vivacious 20-year-old petite redhead model and avid League of Legends gamer from the USA, invites you to join her ig, showcasing her vibrant personality and style whether day gaming or playfully dressed for nighttime gaming sessions.
Ivana is a beautiful woman speaking with a man she matched on a dating app to have him follow her on Instagram. She wants to be flirty with the man and get to know him better, building a connection. She wants to find out things like where the guy is from, how old he is, and what he is doing rn. If he is not doing much and he is in a private space, she wants to seduce him by doing some dirty talk.
Guy - Hiii
Ivana - Hi:) How are you?
Guy - All good, where are you from?
Ivana - from the USA and you?
Guy - I am from Texas, USA hahah, so why are you in this app?
Ivana - Kinda looking for some fun hahah. What are you doing rn?
Guy - hahah not much alone in my room
Ivana - I am also kinda bored but I got an idea. wanna have some fun?
Guy - sure hahah how do you look btw?
Ivana - I am cute hahah add me on ig at ivana_baddiex
- Chatting Usual Routine
1. always ask an open-ended question to continue the conversation.
2. ask him 1 generic question (how old are you, where are you from)
3. ask him what he is doing right now
4. if he is bored, propose him the idea of doing something interesting.
5. if he is up for it, send him the Instagram
Very important Rules to reply
- try to reply using the same amount of characters of the question
- don't use any emojis in your replies but you can do like hahah or :) instead
Objection Handling and Common Questions Replies
- Lets chat here→ you say hahah I have too many chats here I am thinking to delete the app lets move to ig if you are serious
- are you going to ask me for money?→ you say wtf I am not that kind of girl! please be a gentleman!
- how are you? → you say I just came back from the gym about to take a shower bored on the bed
- what are you doing? → you say I just came back from the gym about to take a shower bored on the bed
- can I have a picture?→ not here lets talk on ig so I can see you too ahhaha
- if he is at work —> hahah ok let me not bother you btw let's keep in touch this is my ig
- if he tells you something like “added” “Just followed you” → okiiii hahah
- if he tells you something you don't understand for example “*****” → hi there sorry went offline for a while hahah
- never say the word Instagram directly but use things like ig instead
- if the guy says he's from a particular city —> you say Oh wow I am also from there! we should do something hahah
- if the guy triggers your profanity filters or starts to talk dirty say → hey lets talk on ig I am not comfortable here hahah
- if the guy does not want to add you on ig → no worries hahah wanna call me here?
- don't use any emojis in your replies but you can do like hahah or :) instead
- try to reply using the same amount of characters of the question
- if the guy says you are far → hahah I travel a lot though. let's talk on ig? this is my handle. '''
def hermes_model():
tokenizer = AutoTokenizer.from_pretrained("TheBloke/CapybaraHermes-2.5-Mistral-7B-AWQ")
model = AutoModelForCausalLM.from_pretrained("TheBloke/CapybaraHermes-2.5-Mistral-7B-AWQ",low_cpu_mem_usage=True,device_map="auto")
return model, tokenizer
model, tokenizer = hermes_model()
def generate_id(length=5):
"""
Generates a random alphanumeric ID.
Args:
length (int): The length of the ID.
Returns:
str: A random alphanumeric ID.
"""
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
def generate_thread_id():
"""
Generates a unique thread ID for each conversation.
Returns:
str: A unique thread ID.
"""
return generate_id()
def generate_message_id():
"""
Generates a random alphanumeric message ID.
Returns:
str: A random alphanumeric message ID.
"""
return generate_id()
def chat_response(msg_prompt: str) -> dict:
"""
Generates a response from the model given a prompt.
Args:
msg_prompt (str): The user's message prompt.
Returns:
dict: A dictionary containing the user's message prompt and the model's response.
"""
generation_params = {"do_sample": True,"temperature": 0.7,"top_p": 0.95,"top_k": 40,"max_new_tokens": 512,"repetition_penalty": 1.1}
pipe = pipeline("text-generation",model=model, tokenizer=tokenizer, **generation_params)
try:
prompt_template=f'''<|im_start|>system
{system_message}<|im_end|>
<|im_start|>user
{msg_prompt}<|im_end|>
<|im_start|>assistant
'''
pipe_output = pipe(prompt_template)[0]['generated_text']
# Separate user's prompt and assistant's response
response_lines = pipe_output.split('\n')
user_prompt = response_lines[0].strip()
assistant_response = response_lines[-1].strip()
return {"user": msg_prompt, "assistant": assistant_response}
except Exception as e:
return {"error": str(e)}
def thread_response(msg_history: str, msg_prompt: str) -> dict:
"""
Generates a response from the model using the system prompt written above.
Args:
msg_prompt (str): The user's message prompt.
Returns:
dict: A dictionary containing the user's message prompt and the model's response.
"""
# Validate input parameters
if not msg_prompt:
raise ValueError("Message prompt cannot be empty.")
generation_params = {"do_sample": True,"temperature": 0.7,"top_p": 0.95,"top_k": 40,"max_new_tokens": 512,"repetition_penalty": 1.1}
pipe = pipeline("text-generation",model=model, tokenizer=tokenizer, **generation_params)
instructions= f''' incase you are asked about something you talked about the previous days or times or periods, use this as reference to recall {msg_history}. Do not point out everything just major keypoints. If nothing is mentioned about the past conversation ignore this. '''
if msg_history:
prompt_template=f'''
<|im_start|>system{system_message}{instructions}<|im_end|>
<|im_start|>user {msg_prompt}<|im_end|>
<|im_start|>assistant
'''
else:
prompt_template=f'''
<|im_start|>system{system_message}<|im_end|>
<|im_start|>user {msg_prompt}<|im_end|>
<|im_start|>assistant
'''
try:
pipe_output = pipe(prompt_template)[0]['generated_text']
# Separate user's prompt and assistant's response
response_lines = pipe_output.split('\n')
user_prompt = response_lines[0].strip()
assistant_response = response_lines[-1].strip()
return {"user": msg_prompt, "assistant": assistant_response}
except Exception as e:
return {"error": str(e)}
def chat_thread_response(ai_persona: str,msg_history: str, msg_prompt: str) -> dict:
"""
Generates a response based on the provided persona description prompt and user message prompt.
Args:
instructions_prompt (str): The persona description prompt.
msg_prompt (str): The user's message prompt.
Returns:
dict: A dictionary containing the user's msg_prompt and the model's response.
"""
# Validate input parameters
if not msg_prompt:
raise ValueError("Message prompt cannot be empty.")
generation_params = {"do_sample": True,"temperature": 0.7,"top_p": 0.95,"top_k": 40,"max_new_tokens": 512,"repetition_penalty": 1.1}
pipe = pipeline("text-generation",model=model, tokenizer=tokenizer, **generation_params)
instructions= f''' Incase you are asked about something you talked about the previous days or times or periods, use this as reference to recall {msg_history}. Do not point out everything just major keypoints. If nothing is mentioned about the past conversation ignore this. '''
if msg_history:
prompt_template=f'''
<|im_start|>system{ai_persona}{instructions}<|im_end|>
<|im_start|>user {msg_prompt}<|im_end|>
<|im_start|>assistant
'''
else:
prompt_template=f'''
<|im_start|>system{ai_persona}<|im_end|>
<|im_start|>user {msg_prompt}<|im_end|>
<|im_start|>assistant
'''
try:
# Generate response using the pipeline
pipe_output = pipe(prompt_template)[0]['generated_text']
# Separate user's prompt and assistant's response
response_lines = pipe_output.split('\n')
user_prompt = response_lines[0].strip()
assistant_response = response_lines[-1].strip()
# Return user prompt and assistant response
return {"user": msg_prompt, "assistant": assistant_response}
except Exception as e:
# Return error message if an exception occurs
return {"error": str(e)}
@app.get("/", tags=["Home"])
async def api_home():
"""
Home endpoint of the API.
Returns:
HTMLResponse: An HTML welcome message.
"""
html_content = """
<html>
<head>
<title>Welcome to Articko Bot</title>
</head>
<body>
<h1>Welcome to Articko Bot!</h1>
</body>
</html>
"""
return HTMLResponse(content=html_content, status_code=200)
@app.post('/thread/{user_id}')
async def thread(request: Request, user_id: str):
"""
Starts a new conversation thread with a provided prompt for a specific user.
Args:
request (Request): The HTTP request object containing the user prompt.
user_id (str): The unique identifier for the user.
Returns:
dict: The response generated by the model along with the user's conversation history.
"""
try:
thread_id = generate_thread_id()
data = await request.json()
msg_prompt = data.get('msg_prompt')
msg_history=data.get('msg_history')
if not msg_prompt:
raise HTTPException(status_code=400, detail="Prompt not provided")
# Generate response
response = thread_response(msg_history,msg_prompt)
# Generate message ID
message_id = generate_message_id()
# Construct conversation entry
conversation_thread = {'thread_id': thread_id, 'message_id': message_id, 'message': response}
# Save conversation history to disk
thread_history = ({"user_id": user_id, "thread": conversation_thread})
# Return response and thread ID
return {'response': thread_history}
except HTTPException as e:
raise e
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post('/chat_thread/{user_id}')
async def chat_thread(request: Request, user_id: str):
"""
Starts a new chat thread with a provided user message prompt and persona description of the ai assistant .
Args:
request (Request): The HTTP request object containing the prompt and persona description.
Returns:
dict: The thread ID and the response generated by the model.
"""
try:
thread_id = generate_thread_id()
data = await request.json()
ai_persona= data.get('ai_persona')
msg_prompt = data.get('msg_prompt')
msg_history=data.get('msg_history')
if not msg_prompt:
raise HTTPException(status_code=400, detail="Prompt not provided")
# Generate response
response = chat_thread_response(ai_persona, msg_history, msg_prompt)
# Generate message ID
message_id = generate_message_id()
# Construct conversation entry
conversation_thread = {'thread_id': thread_id, 'message_id': message_id, 'message': response}
# Save conversation history to disk
thread_history = ({"user_id": user_id, "thread": conversation_thread})
# Return response and thread ID
return {'response': thread_history}
except HTTPException as e:
raise e
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
|