gba16326553 commited on
Commit
69105c3
1 Parent(s): cf380d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -19,13 +19,14 @@ def predict(input, history=[]):
19
  new_user_input_ids = tokenizer.encode(
20
  input + tokenizer.eos_token, return_tensors="pt"
21
  )
22
- attentionMask = torch.ones(new_user_input_ids.shape, dtype=torch.long)
 
23
  # append the new user input tokens to the chat history
24
  bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
25
 
26
  # generate a response
27
  history = model.generate(
28
- bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id, attention_mask = attentionMask
29
  ).tolist()
30
 
31
  # convert the tokens to text, and then split the responses into lines
 
19
  new_user_input_ids = tokenizer.encode(
20
  input + tokenizer.eos_token, return_tensors="pt"
21
  )
22
+ #attentionMask = torch.ones(new_user_input_ids.shape, dtype=torch.long)
23
+
24
  # append the new user input tokens to the chat history
25
  bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
26
 
27
  # generate a response
28
  history = model.generate(
29
+ bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id
30
  ).tolist()
31
 
32
  # convert the tokens to text, and then split the responses into lines