BAAI
/

AquilaSQL-7B / chat_test.py
MonteXiaofeng's picture
init model
b05e3ca
raw
history blame
1.93 kB
#If you need to use this code, please install the following transformers
#https://github.com/shunxing1234/transformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
device = torch.device("cuda")
model_info = "BAAI/AquilaSQL-7B"
tokenizer = AutoTokenizer.from_pretrained(model_info, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
model_info, trust_remote_code=True, torch_dtype=torch.float16, device_map='auto')
model.eval()
model.to(device)
torch.manual_seed(123)
text = "有多个数据库表,信息如下:\n表名为cars_data,包含的属性为cars_data.horsepower,cars_data.accelerate,cars_data.mpg,cars_data.id,cars_data.year;表名为continents,包含的属性为continents.contid,continents.continent;表名为countries,包含的属性为countries.continent,countries.countryname,countries.countryid;表名为model_list,包含的属性为model_list.model,model_list.maker,model_list.modelid,它们之间的关系为 countries.continent = continents.contid\n请为下面的问题编写sql查询语句:\n加速度比马力最大的汽车更大的汽车有多少辆? "
def generate_prompt(input: str):
prompt = f"A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.###Human: {input}###Assistant:"
return prompt
stop_tokens = ["###", "[UNK]", "</s>","<|endoftext|>"]
with torch.no_grad():
_input = generate_prompt(text)
tokens = tokenizer.encode_plus(_input, None, max_length=None)['input_ids']
tokens = torch.tensor(tokens)[None,].to(device)
out = model.generate(tokens, do_sample=False, max_length=1024, eos_token_id=100007,max_new_tokens=512,
bad_words_ids=[[tokenizer.encode(token)[0] for token in stop_tokens]])[0]
out = tokenizer.decode(out.cpu().numpy().tolist())
print(out)