Commit
•
60f5ce0
1
Parent(s):
7477a53
Add support for Gemma chat template (#1530)
Browse files* Add support for Gemma chat template
* Update fschat version to include its newest support for Gemma chat style
* pin fastchat to current HEAD
---------
Co-authored-by: Wing Lian <wing.lian@gmail.com>
requirements.txt
CHANGED
@@ -28,7 +28,7 @@ scipy
|
|
28 |
scikit-learn==1.2.2
|
29 |
pynvml
|
30 |
art
|
31 |
-
fschat
|
32 |
gradio==3.50.2
|
33 |
tensorboard
|
34 |
|
|
|
28 |
scikit-learn==1.2.2
|
29 |
pynvml
|
30 |
art
|
31 |
+
fschat @ git+https://github.com/lm-sys/FastChat.git@5095615810cf613dba7f27dd155f571fcff976d8
|
32 |
gradio==3.50.2
|
33 |
tensorboard
|
34 |
|
src/axolotl/monkeypatch/fastchat_conversation_turns.py
CHANGED
@@ -123,6 +123,14 @@ def get_turns( # pylint: disable=too-many-return-statements
|
|
123 |
else:
|
124 |
yield role, ""
|
125 |
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
if self.sep_style == SeparatorStyle.CHATGLM:
|
127 |
# source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
|
128 |
# source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
|
|
|
123 |
else:
|
124 |
yield role, ""
|
125 |
return
|
126 |
+
if self.sep_style == SeparatorStyle.GEMMA:
|
127 |
+
if self.system_message:
|
128 |
+
raise ValueError("Gemma chat template does not support system messages")
|
129 |
+
for i, (role, message) in enumerate(self.messages):
|
130 |
+
prefix = "<bos>" if i == 0 else ""
|
131 |
+
message_str = message if message else ""
|
132 |
+
yield prefix + "<start_of_turn>" + role + "\n", message_str + "<end_of_turn>\n"
|
133 |
+
return
|
134 |
if self.sep_style == SeparatorStyle.CHATGLM:
|
135 |
# source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
|
136 |
# source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
|