GeneZC commited on
Commit
93935eb
1 Parent(s): 39a391e

Upload 6 files

Browse files
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "MiniChat-1.5-DPO-3B",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 3072,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 8192,
12
+ "max_position_embeddings": 4096,
13
+ "model_type": "llama",
14
+ "num_attention_heads": 24,
15
+ "num_hidden_layers": 24,
16
+ "num_key_value_heads": 24,
17
+ "pad_token_id": 0,
18
+ "pretraining_tp": 1,
19
+ "rms_norm_eps": 1e-05,
20
+ "rope_scaling": null,
21
+ "rope_theta": 10000.0,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "float16",
24
+ "transformers_version": "4.33.2",
25
+ "use_cache": true,
26
+ "vocab_size": 49216
27
+ }
conversation.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Conversation prompt templates.
3
+ """
4
+
5
+ import dataclasses
6
+ from enum import auto, Enum
7
+ from typing import List, Tuple, Any
8
+
9
+
10
+ class SeparatorStyle(Enum):
11
+ """Different separator style."""
12
+
13
+ ADD_COLON_SINGLE = auto()
14
+ ADD_COLON_TWO = auto()
15
+ NO_COLON_SINGLE = auto()
16
+ BAIZE = auto()
17
+ PHOENIX = auto()
18
+ MINICHAT = auto()
19
+
20
+
21
+ @dataclasses.dataclass
22
+ class Conversation:
23
+ """A class that keeps all conversation history."""
24
+
25
+ # System prompts
26
+ system: str
27
+ # Two roles
28
+ roles: List[str]
29
+ # All messages
30
+ messages: List[List[str]]
31
+ # Offset of few shot examples
32
+ offset: int
33
+ # Separator
34
+ sep_style: SeparatorStyle
35
+ sep: str
36
+ sep2: str = None
37
+ # Stop criteria (the default one is EOS token)
38
+ stop_str: str = None
39
+ # Stops generation if meeting any token in this list
40
+ stop_token_ids: List[int] = None
41
+
42
+ # Used for the state in the gradio servers.
43
+ # TODO(lmzheng): refactor this
44
+ conv_id: Any = None
45
+ skip_next: bool = False
46
+ model_name: str = None
47
+
48
+ def get_prompt(self):
49
+ if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
50
+ ret = self.system + self.sep
51
+ for role, message in self.messages:
52
+ if message:
53
+ ret += role + ": " + message + self.sep
54
+ else:
55
+ ret += role + ": "
56
+ return ret
57
+ elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
58
+ seps = [self.sep, self.sep2]
59
+ ret = self.system + seps[0]
60
+ for i, (role, message) in enumerate(self.messages):
61
+ if message:
62
+ ret += role + ": " + message + seps[i % 2]
63
+ else:
64
+ ret += role + ": "
65
+ return ret
66
+ elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
67
+ ret = self.system
68
+ for role, message in self.messages:
69
+ if message:
70
+ ret += role + message + self.sep
71
+ else:
72
+ ret += role
73
+ return ret
74
+ elif self.sep_style == SeparatorStyle.BAIZE:
75
+ ret = self.system + "\n"
76
+ for role, message in self.messages:
77
+ if message:
78
+ ret += role + message + "\n"
79
+ else:
80
+ ret += role
81
+ return ret
82
+ elif self.sep_style == SeparatorStyle.PHOENIX:
83
+ ret = self.system
84
+ for role, message in self.messages:
85
+ if message:
86
+ ret += role + ": " + "<s>" + message + "</s>"
87
+ else:
88
+ ret += role + ": " + "<s>"
89
+ return ret
90
+ elif self.sep_style == SeparatorStyle.MINICHAT:
91
+ ret = self.system
92
+ for role, message in self.messages:
93
+ if message:
94
+ ret += role + " " + message + "</s>"
95
+ else:
96
+ ret += role # No space is needed.
97
+ return ret
98
+ else:
99
+ raise ValueError(f"Invalid style: {self.sep_style}")
100
+
101
+ def append_message(self, role, message):
102
+ self.messages.append([role, message])
103
+
104
+ def to_gradio_chatbot(self):
105
+ ret = []
106
+ for i, (role, msg) in enumerate(self.messages[self.offset:]):
107
+ if i % 2 == 0:
108
+ ret.append([msg, None])
109
+ else:
110
+ ret[-1][-1] = msg
111
+ return ret
112
+
113
+ def to_openai_api_messages(self):
114
+ ret = [{"role": "system", "content": self.system}]
115
+
116
+ for i, (_, msg) in enumerate(self.messages[self.offset:]):
117
+ if i % 2 == 0:
118
+ ret.append({"role": "user", "content": msg})
119
+ else:
120
+ if msg is not None:
121
+ ret.append({"role": "assistant", "content": msg})
122
+ return ret
123
+
124
+ def copy(self):
125
+ return Conversation(
126
+ system=self.system,
127
+ roles=self.roles,
128
+ messages=[[x, y] for x, y in self.messages],
129
+ offset=self.offset,
130
+ sep_style=self.sep_style,
131
+ sep=self.sep,
132
+ sep2=self.sep2,
133
+ stop_str=self.stop_str,
134
+ stop_token_ids=self.stop_token_ids,
135
+ conv_id=self.conv_id,
136
+ model_name=self.model_name,
137
+ )
138
+
139
+ def dict(self):
140
+ return {
141
+ "system": self.system,
142
+ "roles": self.roles,
143
+ "messages": self.messages,
144
+ "offset": self.offset,
145
+ "conv_id": self.conv_id,
146
+ "model_name": self.model_name,
147
+ }
148
+
149
+
150
+ conv_vicuna = Conversation(
151
+ system="A chat between a curious user and an artificial intelligence assistant. "
152
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.",
153
+ roles=("USER", "ASSISTANT"),
154
+ messages=(),
155
+ offset=0,
156
+ sep_style=SeparatorStyle.ADD_COLON_TWO,
157
+ sep=" ",
158
+ sep2="</s>",
159
+ )
160
+
161
+ conv_baize = Conversation(
162
+ system="The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.\n",
163
+ roles=("[|Human|]", "[|AI|]"),
164
+ messages=(
165
+ ("[|Human|]", "Hello!"),
166
+ ("[|AI|]", "Hi!"),
167
+ ),
168
+ offset=2,
169
+ sep_style=SeparatorStyle.BAIZE,
170
+ sep="\n",
171
+ stop_str="[|Human|]",
172
+ )
173
+
174
+ conv_phoenix = Conversation(
175
+ system="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
176
+ roles=("Human", "Assistant"),
177
+ messages=(),
178
+ offset=0,
179
+ sep_style=SeparatorStyle.PHOENIX,
180
+ sep="</s>",
181
+ )
182
+
183
+ conv_chatgpt = Conversation(
184
+ system="You are a helpful assistant.",
185
+ roles=("user", "assistant"),
186
+ messages=(),
187
+ offset=0,
188
+ sep_style=None,
189
+ sep=None,
190
+ )
191
+
192
+ conv_minichat = Conversation(
193
+ system="‘MiniChat’是一个由‘Beccurio’开发的AI语言模型。下面是人类和MiniChat之间的一段对话。MiniChat的回复应当尽可能详细,并且以Markdown的形式输出。MiniChat应当拒绝参与违背伦理的讨论。</s>",
194
+ roles=("[|User|]", "[|Assistant|]"),
195
+ messages=(),
196
+ offset=0,
197
+ sep_style=SeparatorStyle.MINICHAT,
198
+ sep="</s>",
199
+ )
200
+
201
+
202
+ conv_templates = {
203
+ "vicuna": conv_vicuna,
204
+ "baize": conv_baize,
205
+ "phoenix": conv_phoenix,
206
+ "chatgpt": conv_chatgpt,
207
+ "minichat": conv_minichat,
208
+ }
209
+
210
+ def get_default_conv_template(model_name):
211
+ model_name = model_name.lower()
212
+ try:
213
+ ret = conv_templates[model_name]
214
+ return ret.copy()
215
+ except:
216
+ raise NotImplementedError(f"No support for model {model_name}.")
217
+
218
+
219
+ if __name__ == "__main__":
220
+ conv = conv_templates["minichat"].copy()
221
+ conv.append_message(conv.roles[0], "Write a Python function that checks if a given number is even or odd.")
222
+ conv.append_message(conv.roles[1], None)
223
+ print([conv.get_prompt()])
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6af991bdb553810f86f2ac2cbf964bafdc111e1239c63f74ca4e2dabdddafb30
3
+ size 6040956605
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae87c0db2b21b0fa3fdc5e19d1f9cea94efb703cc7c6281d8718a6714b3cc2be
3
+ size 748869
tokenizer_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": null,
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "pad_token": null,
24
+ "sp_model_kwargs": {},
25
+ "spaces_between_special_tokens": false,
26
+ "tokenizer_class": "LlamaTokenizer",
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ },
35
+ "use_default_system_prompt": true,
36
+ "use_fast": true
37
+ }