Update app.py
Browse files
app.py
CHANGED
@@ -28,7 +28,7 @@ def mistral_model():
|
|
28 |
tuple: A tuple containing the loaded model and tokenizer.
|
29 |
"""
|
30 |
model_id = "/code/mistral/"
|
31 |
-
model = AutoModelForCausalLM.from_pretrained("/home/user/app/mistral/")
|
32 |
tokenizer = AutoTokenizer.from_pretrained("/home/user/app/mistral/")
|
33 |
|
34 |
return model,tokenizer
|
@@ -58,7 +58,7 @@ def load_model_norm():
|
|
58 |
def mistral_generated_response(msg_prompt, persona_desc_prompt):
|
59 |
user_prompt = f'{msg_prompt} [/INST]'
|
60 |
persona_prompt = f'{persona_desc_prompt} [/INST]'
|
61 |
-
prompt_template = f'''
|
62 |
|
63 |
encodeds = tokenizer.apply_chat_template(prompt_template, return_tensors="pt")
|
64 |
|
|
|
28 |
tuple: A tuple containing the loaded model and tokenizer.
|
29 |
"""
|
30 |
model_id = "/code/mistral/"
|
31 |
+
model = AutoModelForCausalLM.from_pretrained("/home/user/app/mistral/",device_map="auto")
|
32 |
tokenizer = AutoTokenizer.from_pretrained("/home/user/app/mistral/")
|
33 |
|
34 |
return model,tokenizer
|
|
|
58 |
def mistral_generated_response(msg_prompt, persona_desc_prompt):
|
59 |
user_prompt = f'{msg_prompt} [/INST]'
|
60 |
persona_prompt = f'{persona_desc_prompt} [/INST]'
|
61 |
+
prompt_template = f'''[INST] Instruction:{persona_prompt} [INST] {user_prompt}'''
|
62 |
|
63 |
encodeds = tokenizer.apply_chat_template(prompt_template, return_tensors="pt")
|
64 |
|