Commit
•
52c9302
1
Parent(s):
6f1b2ef
Upload train-config.yaml with huggingface_hub
Browse files- train-config.yaml +55 -0
train-config.yaml
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dataset_args:
|
2 |
+
path: argilla/dpo-mix-7k
|
3 |
+
|
4 |
+
format_args:
|
5 |
+
prompt_format: zephyr-gemma
|
6 |
+
|
7 |
+
model_args:
|
8 |
+
pretrained_model_name_or_path: HuggingFaceH4/zephyr-7b-gemma-sft-v0.1
|
9 |
+
torch_dtype: bfloat16
|
10 |
+
|
11 |
+
wandb_args:
|
12 |
+
entity: argilla-io
|
13 |
+
project: zephyr-gemma-dpo
|
14 |
+
name: 16bit
|
15 |
+
|
16 |
+
training_args:
|
17 |
+
# DPOTrainer
|
18 |
+
beta: 0.05
|
19 |
+
loss_type: sigmoid
|
20 |
+
max_length: 1024
|
21 |
+
max_prompt_length: 512
|
22 |
+
# Trainer (train)
|
23 |
+
bf16: true
|
24 |
+
do_train: true
|
25 |
+
gradient_accumulation_steps: 8
|
26 |
+
gradient_checkpointing: true
|
27 |
+
gradient_checkpointing_kwargs:
|
28 |
+
use_reentrant: false
|
29 |
+
learning_rate: 5.0e-7
|
30 |
+
logging_steps: 10
|
31 |
+
lr_scheduler_type: cosine
|
32 |
+
num_train_epochs: 2
|
33 |
+
optim: adamw_torch
|
34 |
+
output_dir: data/gemma-7b-it-dpo
|
35 |
+
per_device_train_batch_size: 2
|
36 |
+
seed: 42
|
37 |
+
warmup_ratio: 0.1
|
38 |
+
warmup_steps: 100
|
39 |
+
report_to:
|
40 |
+
- wandb
|
41 |
+
- tensorboard
|
42 |
+
# Trainer (eval)
|
43 |
+
do_eval: true
|
44 |
+
evaluation_strategy: steps
|
45 |
+
eval_steps: 100
|
46 |
+
per_device_eval_batch_size: 4
|
47 |
+
# Trainer (save)
|
48 |
+
hub_model_id: alvarobartt/zephyr-gemma-dpo
|
49 |
+
hub_private_repo: true
|
50 |
+
push_to_hub: true
|
51 |
+
save_strategy: "no" # Quoted, otherwise is casted to `False`
|
52 |
+
save_total_limit: null
|
53 |
+
|
54 |
+
use_accelerate: true
|
55 |
+
use_unsloth: false
|