Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
CuteGPT is an open-source conversational language model that supports both Chinese and English, developed by [Fudan University KnowledgeWorks Laboratory](http://kw.fudan.edu.cn/). It is based on the original Llama model structure, and has a scale of 13B (13 billion) parameters. It can perform int8 precision inference on a single 3090 graphics card. CuteGPT expands the Chinese vocabulary and performs pre-training on the Llama model, improving its ability to understand Chinese. Subsequently, it is fine-tuned with conversational instructions to enhance the model's ability to understand instructions.
|
2 |
+
Based on the KW-CuteGPT-7b version, KW-CuteGPT-13b has improved accuracy in knowledge, understanding of complex instructions, ability to comprehend long texts, reasoning ability, faithful question answering, and other capabilities. Currently, the KW-CuteGPT-13b version model outperforms the majority of models of similar scale in certain evaluation tasks.
|
3 |
+
|
4 |
+
```python
|
5 |
+
from transformers import LlamaForCausalLM, LlamaTokenizer
|
6 |
+
from peft import PeftModel
|
7 |
+
import torch
|
8 |
+
```
|
9 |
+
|
10 |
+
* The prompt template for inference
|
11 |
+
|
12 |
+
```python
|
13 |
+
overall_instruction = "你是复旦大学知识工场实验室训练出来的语言模型CuteGPT。给定任务描述,请给出对应请求的回答。\n"
|
14 |
+
def generate_prompt(query, history, input=None):
|
15 |
+
prompt = overall_instruction
|
16 |
+
for i, (old_query, response) in enumerate(history):
|
17 |
+
prompt += "Q: {}\nA: {}\n".format(old_query, response)
|
18 |
+
prompt += "Q: {}\nA: ".format(query)
|
19 |
+
return prompt
|
20 |
+
```
|
21 |
+
|
22 |
+
* Load model, tokenizer, here we use lora version of checkpoint
|
23 |
+
* w/o 8bit quantization
|
24 |
+
```python
|
25 |
+
model_name = "XuYipei/kw-cutegpt-13b-base"
|
26 |
+
LORA_WEIGHTS = "Abbey4799/kw-cutegpt-13b-ift-lora"
|
27 |
+
tokenizer = LlamaTokenizer.from_pretrained(LORA_WEIGHTS)
|
28 |
+
model = LlamaForCausalLM.from_pretrained(
|
29 |
+
model_name,
|
30 |
+
torch_dtype=torch.float16,
|
31 |
+
device_map="auto",
|
32 |
+
)
|
33 |
+
model.eval()
|
34 |
+
model = PeftModel.from_pretrained(model, LORA_WEIGHTS).to(torch.float16)
|
35 |
+
device = torch.device("cuda")
|
36 |
+
```
|
37 |
+
* w/ 8bit quantization (The performance of the model will experience some degradation after quantization.)
|
38 |
+
```python
|
39 |
+
model_name = "XuYipei/kw-cutegpt-13b-base"
|
40 |
+
LORA_WEIGHTS = "Abbey4799/kw-cutegpt-13b-ift-lora"
|
41 |
+
tokenizer = LlamaTokenizer.from_pretrained(LORA_WEIGHTS)
|
42 |
+
model = LlamaForCausalLM.from_pretrained(
|
43 |
+
model_name,
|
44 |
+
load_in_8bit=True,
|
45 |
+
torch_dtype=torch.float16,
|
46 |
+
device_map="auto",
|
47 |
+
)
|
48 |
+
model.eval()
|
49 |
+
model = PeftModel.from_pretrained(model, LORA_WEIGHTS)
|
50 |
+
device = torch.device("cuda")
|
51 |
+
```
|
52 |
+
|
53 |
+
* Inference
|
54 |
+
|
55 |
+
```python
|
56 |
+
history = []
|
57 |
+
queries = ['请推荐五本名著,依次列出作品名、作者','再来三本呢?']
|
58 |
+
memory_limit = 3 # the number of (query, response) to remember
|
59 |
+
for query in queries:
|
60 |
+
prompt = generate_prompt(query, history)
|
61 |
+
print(prompt)
|
62 |
+
|
63 |
+
input_ids = tokenizer(prompt, return_tensors="pt", padding=False, truncation=False, add_special_tokens=False)
|
64 |
+
input_ids = input_ids["input_ids"].to(device)
|
65 |
+
|
66 |
+
with torch.no_grad():
|
67 |
+
outputs=model.generate(
|
68 |
+
input_ids=input_ids,
|
69 |
+
top_p=0.8,
|
70 |
+
top_k=50,
|
71 |
+
repetition_penalty=1.1,
|
72 |
+
max_new_tokens = 256,
|
73 |
+
early_stopping = True,
|
74 |
+
eos_token_id = tokenizer.convert_tokens_to_ids('<s>'),
|
75 |
+
pad_token_id = tokenizer.eos_token_id,
|
76 |
+
min_length = input_ids.shape[1] + 1
|
77 |
+
)
|
78 |
+
s = outputs[0][input_ids.shape[1]:]
|
79 |
+
response=tokenizer.decode(s)
|
80 |
+
response = response.replace('<s>', '').replace('<end>', '').replace('</s>', '')
|
81 |
+
print(response)
|
82 |
+
history.append((query, response))
|
83 |
+
history = history[-memory_limit:]
|
84 |
+
```
|