barneystinson
commited on
Commit
•
45dfd01
1
Parent(s):
0c7b7c9
Upload training_config.yml with huggingface_hub
Browse files- training_config.yml +4 -4
training_config.yml
CHANGED
@@ -19,7 +19,7 @@ checkpointer:
|
|
19 |
- meta_model_1.pt
|
20 |
adapter_checkpoint: null
|
21 |
recipe_checkpoint: null
|
22 |
-
output_dir: output_checkpoints/
|
23 |
model_type: LLAMA3
|
24 |
resume_from_checkpoint: false
|
25 |
interim_checkpoint_steps: 15000
|
@@ -50,13 +50,13 @@ batch_size: 6
|
|
50 |
optimizer:
|
51 |
_component_: torch.optim.AdamW
|
52 |
weight_decay: 0.99
|
53 |
-
lr:
|
54 |
lr_scheduler:
|
55 |
_component_: torchtune.modules.get_cosine_schedule_with_warmup
|
56 |
-
num_warmup_steps:
|
57 |
loss:
|
58 |
_component_: torch.nn.CrossEntropyLoss
|
59 |
-
epochs:
|
60 |
max_steps_per_epoch: null
|
61 |
gradient_accumulation_steps: 16
|
62 |
compile: false
|
|
|
19 |
- meta_model_1.pt
|
20 |
adapter_checkpoint: null
|
21 |
recipe_checkpoint: null
|
22 |
+
output_dir: output_checkpoints/experiment_1
|
23 |
model_type: LLAMA3
|
24 |
resume_from_checkpoint: false
|
25 |
interim_checkpoint_steps: 15000
|
|
|
50 |
optimizer:
|
51 |
_component_: torch.optim.AdamW
|
52 |
weight_decay: 0.99
|
53 |
+
lr: 1.0e-05
|
54 |
lr_scheduler:
|
55 |
_component_: torchtune.modules.get_cosine_schedule_with_warmup
|
56 |
+
num_warmup_steps: 4
|
57 |
loss:
|
58 |
_component_: torch.nn.CrossEntropyLoss
|
59 |
+
epochs: 60
|
60 |
max_steps_per_epoch: null
|
61 |
gradient_accumulation_steps: 16
|
62 |
compile: false
|