Fix(tokenizer): Fix condition to add pad token (#477)
Browse files* Fix(tokenizer): Fix condition to add pad token
* chore: fix lint
src/axolotl/utils/models.py
CHANGED
@@ -55,10 +55,15 @@ def load_tokenizer(cfg):
|
|
55 |
**tokenizer_kwargs,
|
56 |
)
|
57 |
|
58 |
-
if
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
|
|
|
|
|
|
|
|
62 |
# set a pad_token, but use eos_token so we don't add a new token
|
63 |
tokenizer.pad_token = LLAMA_DEFAULT_EOS_TOKEN
|
64 |
|
|
|
55 |
**tokenizer_kwargs,
|
56 |
)
|
57 |
|
58 |
+
if (
|
59 |
+
tokenizer.__class__.__name__
|
60 |
+
in [
|
61 |
+
"LlamaTokenizer",
|
62 |
+
"LlamaTokenizerFast",
|
63 |
+
]
|
64 |
+
and hasattr(tokenizer, "pad_token")
|
65 |
+
and not tokenizer.pad_token
|
66 |
+
):
|
67 |
# set a pad_token, but use eos_token so we don't add a new token
|
68 |
tokenizer.pad_token = LLAMA_DEFAULT_EOS_TOKEN
|
69 |
|