teaevo commited on
Commit
501ede0
·
1 Parent(s): da794fe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -59,16 +59,16 @@ def chatbot_response(user_message, history=[]):
59
  response = chatbot_tokenizer.decode(outputs[0], skip_special_tokens=True)
60
  '''
61
  # tokenize the new input sentence
62
- new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
63
 
64
  # append the new user input tokens to the chat history
65
  bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
66
 
67
  # generate a response
68
- history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
69
 
70
  # convert the tokens to text, and then split the responses into the right format
71
- response = tokenizer.decode(history[0]).split("<|endoftext|>")
72
  response = [(response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)] # convert to tuples of list
73
 
74
  return response, history
 
59
  response = chatbot_tokenizer.decode(outputs[0], skip_special_tokens=True)
60
  '''
61
  # tokenize the new input sentence
62
+ new_user_input_ids = chatbot_tokenizer.encode(input + chatbot_tokenizer.eos_token, return_tensors='pt')
63
 
64
  # append the new user input tokens to the chat history
65
  bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
66
 
67
  # generate a response
68
+ history = chatbot_model.generate(bot_input_ids, max_length=1000, pad_token_id=chatbot_tokenizer.eos_token_id).tolist()
69
 
70
  # convert the tokens to text, and then split the responses into the right format
71
+ response = chatbot_tokenizer.decode(history[0]).split("<|endoftext|>")
72
  response = [(response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)] # convert to tuples of list
73
 
74
  return response, history