teaevo commited on
Commit
0deb7d9
·
1 Parent(s): cee9f1f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -25
app.py CHANGED
@@ -42,32 +42,31 @@ def predict(input, history=[]):
42
  # Check if the user input is a question
43
  is_question = "?" in input
44
 
45
-
46
- # tokenize the new input sentence
47
- new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
48
-
49
- # Convert history tensor to a list
50
- history_list = history.tolist() if isinstance(history, torch.Tensor) else history
 
 
 
 
 
 
 
 
51
 
52
- # append the new user input tokens to the chat history
53
- bot_input_ids = torch.cat([torch.LongTensor(history_list), new_user_input_ids], dim=-1)
54
-
55
- # generate a response
56
- history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
57
- response_dialog = tokenizer.decode(history[0])
58
-
59
- # Use the SQL model to generate a response
60
- encoding = sql_tokenizer(table=table, query=response_dialog, return_tensors="pt")
61
- outputs = sql_model.generate(**encoding)
62
- response_sql = sql_tokenizer.batch_decode(outputs, skip_special_tokens=True)
63
-
64
- # Add the SQL model's response to the chat history
65
- history.extend(response_sql)
66
-
67
- # convert the tokens to text, and then split the responses into the right format
68
- response = tokenizer.decode(history[0]).split("<|endoftext|>")
69
- response = [(response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)] # convert to tuples of list
70
-
71
  return response, history
72
 
73
 
 
42
  # Check if the user input is a question
43
  is_question = "?" in input
44
 
45
+ if is_question:
46
+ sql_encoding = sql_tokenizer(table=table, query=input + sql_tokenizer.eos_token, return_tensors="pt")
47
+ sql_outputs = sql_model.generate(**sql_encoding)
48
+ response = sql_tokenizer.batch_decode(sql_outputs, skip_special_tokens=True)
49
+
50
+ '''
51
+ bot_input_ids = torch.cat([torch.LongTensor(history), sql_encoding], dim=-1)
52
+ history = sql_model.generate(bot_input_ids, max_length=1000, pad_token_id=sql_tokenizer.eos_token_id).tolist()
53
+ response = sql_tokenizer.decode(history[0]).split("<|endoftext|>")
54
+ response = [(response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)]
55
+ '''
56
+ else:
57
+ # tokenize the new input sentence
58
+ new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
59
 
60
+ # append the new user input tokens to the chat history
61
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
62
+
63
+ # generate a response
64
+ history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
65
+
66
+ # convert the tokens to text, and then split the responses into the right format
67
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
68
+ response = [(response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)] # convert to tuples of list
69
+
 
 
 
 
 
 
 
 
 
70
  return response, history
71
 
72