Zenithwang commited on
Commit
bf14f3d
·
verified ·
1 Parent(s): 94281a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -53,13 +53,14 @@ def predict(message, history):
53
  print(f'history: {history}')
54
  for i, item in enumerate(history):
55
  model_messages.append({"role": user_role, "content": item[0]})
56
- if i < len(history) - 1:
57
- model_messages.append({"role": assistant_role, "content": item[1]})
58
 
 
 
59
  print(f'model_messages: {model_messages}')
60
-
 
61
  model_inputs = tokenizer.apply_chat_template(model_messages, add_generation_prompt=True, return_tensors="pt").to(device)
62
- print(f'model_final_inputs: {tokenizer.apply_chat_template(model_messages, add_generation_prompt=True, tokenize=False)}')
63
  # model_inputs = tokenizer([messages], return_tensors="pt").to(device)
64
 
65
  streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
 
53
  print(f'history: {history}')
54
  for i, item in enumerate(history):
55
  model_messages.append({"role": user_role, "content": item[0]})
56
+ model_messages.append({"role": assistant_role, "content": item[1]})
 
57
 
58
+ model_messages.append({"role": user_role, "content": message})
59
+
60
  print(f'model_messages: {model_messages}')
61
+
62
+ print(f'model_final_inputs: {tokenizer.apply_chat_template(model_messages, add_generation_prompt=True, tokenize=False)}, flash=True')
63
  model_inputs = tokenizer.apply_chat_template(model_messages, add_generation_prompt=True, return_tensors="pt").to(device)
 
64
  # model_inputs = tokenizer([messages], return_tensors="pt").to(device)
65
 
66
  streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)