Vitrous commited on
Commit
c9cc0e9
·
verified ·
1 Parent(s): fe8bf67

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -67,8 +67,10 @@ def generate_response(prompt: str) -> dict:
67
  input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
68
  output = model.generate(inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512)
69
  generated_response = (tokenizer.decode(output[0]))
 
 
70
 
71
- return {"user": prompt, "assistant": generated_response}
72
 
73
 
74
  def generate_prompt_response(persona_prompt: str, prompt: str) -> dict:
 
67
  input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
68
  output = model.generate(inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512)
69
  generated_response = (tokenizer.decode(output[0]))
70
+ # Extract the assistant's reply
71
+ assistant_reply = generated_response.split('\n')[1]
72
 
73
+ return {"user": prompt, "assistant": assistant_reply}
74
 
75
 
76
  def generate_prompt_response(persona_prompt: str, prompt: str) -> dict: