Delta-Vector commited on
Commit
cbb8d17
·
verified ·
1 Parent(s): 6f50300

Upload 2 files

Browse files
Erebus_Gemma_Control_Completion.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: unsloth/gemma-2-9b
2
+ model_type: AutoModelForCausalLM
3
+ tokenizer_type: AutoTokenizer
4
+
5
+ hub_model_id: NewEden/Erebus-Control-9B
6
+ hub_strategy: "all_checkpoints"
7
+ push_dataset_to_hub:
8
+ hf_use_auth_token: true
9
+
10
+ plugins:
11
+ - axolotl.integrations.liger.LigerPlugin
12
+ liger_rope: true
13
+ liger_rms_norm: true
14
+ liger_swiglu: true
15
+ #liger_cross_entropy: true
16
+ liger_fused_linear_cross_entropy: true
17
+
18
+ load_in_8bit: false
19
+ load_in_4bit: false
20
+ strict: false
21
+
22
+ datasets:
23
+ - path: Mielikki/Erebus-87k
24
+ type: completion
25
+ field: body
26
+ # - path: NewEden/4chan-smol-sharegpt
27
+ # type: sharegpt
28
+ # conversation: chatml
29
+ # - path: anthracite-org/kalo-opus-instruct-22k-no-refusal
30
+ # type: sharegpt
31
+ # conversation: chatml
32
+ # - path: NewEden/Claude-Instruct-2.7k
33
+ # type: sharegpt
34
+ # conversation: chatml
35
+ # - path: Epiculous/Synthstruct-Gens-v1.1-Filtered-n-Cleaned
36
+ # type: sharegpt
37
+ # conversation: chatml
38
+ # - path: NewEden/Claude-Instruct-5K
39
+ # type: sharegpt
40
+ # conversation: chatml
41
+ #chat_template: chatml
42
+ shuffle_merged_datasets: true
43
+ #default_system_message: "You are an assistant that responds to the user."
44
+ dataset_prepared_path: Control-9B
45
+ val_set_size: 0.0
46
+ output_dir: Control-9B
47
+
48
+ sequence_len: 32768
49
+ sample_packing: true
50
+ pad_to_sequence_len: true
51
+
52
+ wandb_project: 14b-erebus
53
+ wandb_entity:
54
+ wandb_watch:
55
+ wandb_name: r3-base-attempt-01
56
+ wandb_log_model:
57
+
58
+ gradient_accumulation_steps: 4
59
+ micro_batch_size: 1
60
+ num_epochs: 2
61
+ optimizer: adamw_bnb_8bit
62
+ lr_scheduler: cosine
63
+ learning_rate: 0.00001
64
+
65
+ train_on_inputs: false
66
+ group_by_length: false
67
+ bf16: auto
68
+ fp16:
69
+ tf32: false
70
+
71
+ gradient_checkpointing: true
72
+ early_stopping_patience:
73
+ resume_from_checkpoint:
74
+ local_rank:
75
+ logging_steps: 1
76
+ xformers_attention:
77
+ flash_attention: true
78
+
79
+ warmup_steps: 5
80
+ evals_per_epoch:
81
+ eval_table_size:
82
+ eval_max_new_tokens:
83
+ saves_per_epoch: 2
84
+ debug:
85
+ deepspeed: deepspeed_configs/zero2.json
86
+ weight_decay: 0.01
87
+ fsdp:
88
+ fsdp_config:
89
+ special_tokens:
90
+ pad_token: <|endoftext|>
91
+ eos_token: <|endoftext|>
Erebus_Gemma_control_Inst.yaml ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: NewEden/Erebus-Control-9B
2
+ model_type: AutoModelForCausalLM
3
+ tokenizer_type: AutoTokenizer
4
+
5
+ hub_model_id: NewEden/Erebus-Control-9B-Final
6
+ hub_strategy: "all_checkpoints"
7
+ push_dataset_to_hub:
8
+ hf_use_auth_token: true
9
+
10
+ plugins:
11
+ - axolotl.integrations.liger.LigerPlugin
12
+ liger_rope: true
13
+ liger_rms_norm: true
14
+ liger_swiglu: true
15
+ #liger_cross_entropy: true
16
+ liger_fused_linear_cross_entropy: true
17
+
18
+ load_in_8bit: false
19
+ load_in_4bit: false
20
+ strict: false
21
+
22
+ datasets:
23
+ - path: NewEden/4chan-smol-sharegpt
24
+ type: sharegpt
25
+ conversation: chatml
26
+ - path: anthracite-org/kalo-opus-instruct-22k-no-refusal
27
+ type: sharegpt
28
+ conversation: chatml
29
+ - path: NewEden/Claude-Instruct-2.7k
30
+ type: sharegpt
31
+ conversation: chatml
32
+ - path: Epiculous/Synthstruct-Gens-v1.1-Filtered-n-Cleaned
33
+ type: sharegpt
34
+ conversation: chatml
35
+ - path: NewEden/Claude-Instruct-5K
36
+ type: sharegpt
37
+ conversation: chatml
38
+ - path: NewEden/OpenCAI-ShareGPT
39
+ type: sharegpt
40
+ conversation: chatml
41
+ - path: NewEden/Roleplay-Logs-Sharegpt-Ngram-cleaned
42
+ type: sharegpt
43
+ conversation: chatml
44
+ - path: NewEden/PIPPA-Mega-Filtered
45
+ type: sharegpt
46
+ conversation: chatml
47
+ #chat_template: chatml
48
+ shuffle_merged_datasets: true
49
+ #default_system_message: "You are an assistant that responds to the user."
50
+ dataset_prepared_path: Control-9B
51
+ val_set_size: 0.0
52
+ output_dir: Control-9B
53
+
54
+ sequence_len: 8192
55
+ sample_packing: true
56
+ pad_to_sequence_len: true
57
+
58
+ wandb_project: Erebus-Control-9b
59
+ wandb_entity:
60
+ wandb_watch:
61
+ wandb_name: attempt-1
62
+ wandb_log_model:
63
+
64
+ gradient_accumulation_steps: 4
65
+ micro_batch_size: 1
66
+ num_epochs: 4
67
+ optimizer: adamw_bnb_8bit
68
+ lr_scheduler: cosine
69
+ learning_rate: 0.00001
70
+
71
+ train_on_inputs: false
72
+ group_by_length: false
73
+ bf16: auto
74
+ fp16:
75
+ tf32: false
76
+
77
+ gradient_checkpointing: true
78
+ early_stopping_patience:
79
+ resume_from_checkpoint:
80
+ local_rank:
81
+ logging_steps: 1
82
+ xformers_attention:
83
+ flash_attention: true
84
+
85
+ warmup_steps: 10
86
+ evals_per_epoch:
87
+ eval_table_size:
88
+ eval_max_new_tokens:
89
+ saves_per_epoch: 2
90
+ debug:
91
+ deepspeed: deepspeed_configs/zero2.json
92
+ weight_decay: 0.001
93
+ fsdp:
94
+ fsdp_config:
95
+ special_tokens:
96
+ pad_token: <pad>