lucyknada commited on
Commit
28141a0
1 Parent(s): 1e86e36

Upload ./README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +175 -0
README.md ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - generated_from_trainer
5
+ model-index:
6
+ - name: workspace/data/9b-fft-out
7
+ results: []
8
+ ---
9
+ ### exl2 quant (measurement.json in main branch)
10
+ ---
11
+ ### check revisions for quants
12
+ ---
13
+
14
+
15
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+ [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl)
19
+ <details><summary>See axolotl config</summary>
20
+
21
+ axolotl version: `0.4.1`
22
+ ```yaml
23
+ base_model: /workspace/data/gemma-2-9b-chatml
24
+ model_type: AutoModelForCausalLM
25
+ tokenizer_type: AutoTokenizer
26
+
27
+ plugins:
28
+ - axolotl.integrations.liger.LigerPlugin
29
+ liger_rope: false
30
+ liger_rms_norm: false
31
+ liger_swiglu: true
32
+ liger_cross_entropy: true
33
+ liger_fused_linear_cross_entropy: false
34
+
35
+ load_in_8bit: false
36
+ load_in_4bit: false
37
+ strict: false
38
+
39
+ datasets:
40
+ - path: anthracite-core/c2_logs_16k_llama_v1.1
41
+ type: sharegpt
42
+ conversation: chatml
43
+ - path: NewEden/Claude-Instruct-5K
44
+ type: sharegpt
45
+ conversation: chatml
46
+ - path: anthracite-org/kalo-opus-instruct-22k-no-refusal
47
+ type: sharegpt
48
+ conversation: chatml
49
+ - path: Epiculous/SynthRP-Gens-v1.1-Filtered-n-Cleaned
50
+ type: sharegpt
51
+ conversation: chatml
52
+ - path: lodrick-the-lafted/kalo-opus-instruct-3k-filtered
53
+ type: sharegpt
54
+ conversation: chatml
55
+ - path: anthracite-org/nopm_claude_writing_fixed
56
+ type: sharegpt
57
+ conversation: chatml
58
+ - path: Epiculous/Synthstruct-Gens-v1.1-Filtered-n-Cleaned
59
+ type: sharegpt
60
+ conversation: chatml
61
+ - path: anthracite-org/kalo_opus_misc_240827
62
+ type: sharegpt
63
+ conversation: chatml
64
+ - path: anthracite-org/kalo_misc_part2
65
+ type: sharegpt
66
+ conversation: chatml
67
+ chat_template: chatml
68
+ shuffle_merged_datasets: false
69
+ default_system_message: "You are a helpful assistant that responds to the user."
70
+ dataset_prepared_path: /workspace/data/9b-fft-data
71
+ val_set_size: 0.0
72
+ output_dir: /workspace/data/9b-fft-out
73
+
74
+ sequence_len: 8192
75
+ sample_packing: true
76
+ eval_sample_packing: false
77
+ pad_to_sequence_len: true
78
+
79
+ adapter:
80
+ lora_model_dir:
81
+ lora_r:
82
+ lora_alpha:
83
+ lora_dropout:
84
+ lora_target_linear:
85
+ lora_fan_in_fan_out:
86
+
87
+ wandb_project: 9b-Nemo-config-fft
88
+ wandb_entity:
89
+ wandb_watch:
90
+ wandb_name: attempt-01
91
+ wandb_log_model:
92
+
93
+ gradient_accumulation_steps: 4
94
+ micro_batch_size: 1
95
+ num_epochs: 4
96
+ optimizer: paged_adamw_8bit
97
+ lr_scheduler: cosine
98
+ learning_rate: 0.00001
99
+
100
+ train_on_inputs: false
101
+ group_by_length: false
102
+ bf16: auto
103
+ fp16:
104
+ tf32: false
105
+
106
+ gradient_checkpointing: true
107
+ early_stopping_patience:
108
+ auto_resume_from_checkpoints: true
109
+ local_rank:
110
+ logging_steps: 1
111
+ xformers_attention:
112
+ flash_attention: true
113
+
114
+ warmup_steps: 10
115
+ evals_per_epoch:
116
+ eval_table_size:
117
+ eval_max_new_tokens:
118
+ saves_per_epoch: 1
119
+ debug:
120
+ deepspeed: deepspeed_configs/zero3_bf16.json
121
+ weight_decay: 0.001
122
+ fsdp:
123
+ fsdp_config:
124
+ special_tokens:
125
+ pad_token: <pad>
126
+
127
+ ```
128
+
129
+ </details><br>
130
+
131
+ # workspace/data/9b-fft-out
132
+
133
+ This model was trained from scratch on the None dataset.
134
+
135
+ ## Model description
136
+
137
+ More information needed
138
+
139
+ ## Intended uses & limitations
140
+
141
+ More information needed
142
+
143
+ ## Training and evaluation data
144
+
145
+ More information needed
146
+
147
+ ## Training procedure
148
+
149
+ ### Training hyperparameters
150
+
151
+ The following hyperparameters were used during training:
152
+ - learning_rate: 1e-05
153
+ - train_batch_size: 1
154
+ - eval_batch_size: 1
155
+ - seed: 42
156
+ - distributed_type: multi-GPU
157
+ - num_devices: 8
158
+ - gradient_accumulation_steps: 4
159
+ - total_train_batch_size: 32
160
+ - total_eval_batch_size: 8
161
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
162
+ - lr_scheduler_type: cosine
163
+ - lr_scheduler_warmup_steps: 10
164
+ - num_epochs: 4
165
+
166
+ ### Training results
167
+
168
+
169
+
170
+ ### Framework versions
171
+
172
+ - Transformers 4.45.0.dev0
173
+ - Pytorch 2.3.1+cu121
174
+ - Datasets 2.21.0
175
+ - Tokenizers 0.19.1