fix(yml): update llama-3 config (#1543) [skip ci]
Browse files
examples/llama-3/lora-8b.yml
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
-
base_model:
|
2 |
model_type: LlamaForCausalLM
|
3 |
-
tokenizer_type:
|
4 |
|
5 |
load_in_8bit: true
|
6 |
load_in_4bit: false
|
@@ -64,3 +64,4 @@ weight_decay: 0.0
|
|
64 |
fsdp:
|
65 |
fsdp_config:
|
66 |
special_tokens:
|
|
|
|
1 |
+
base_model: meta-llama/Meta-Llama-3-8B
|
2 |
model_type: LlamaForCausalLM
|
3 |
+
tokenizer_type: AutoTokenizer
|
4 |
|
5 |
load_in_8bit: true
|
6 |
load_in_4bit: false
|
|
|
64 |
fsdp:
|
65 |
fsdp_config:
|
66 |
special_tokens:
|
67 |
+
pad_token: <|end_of_text|>
|