crypty commited on
Commit
32b16c7
1 Parent(s): 2e2d4a6

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: openlm-research/open_llama_3b_v2
3
+ library_name: peft
4
+ license: apache-2.0
5
+ tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: outputs/qlora_short-out
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl)
16
+ <details><summary>See axolotl config</summary>
17
+
18
+ axolotl version: `0.4.1`
19
+ ```yaml
20
+ base_model: openlm-research/open_llama_3b_v2
21
+ model_type: LlamaForCausalLM
22
+ tokenizer_type: LlamaTokenizer
23
+ load_in_8bit: false
24
+ load_in_4bit: true
25
+ strict: false
26
+ push_dataset_to_hub:
27
+ datasets:
28
+ - path: mhenrichsen/alpaca_2k_test
29
+ type: alpaca
30
+ dataset_prepared_path:
31
+ val_set_size: 0.05
32
+ adapter: qlora
33
+ lora_model_dir:
34
+ sequence_len: 1024
35
+ sample_packing: true
36
+ lora_r: 8
37
+ lora_alpha: 32
38
+ lora_dropout: 0.05
39
+ lora_target_modules:
40
+ lora_target_linear: true
41
+ lora_fan_in_fan_out:
42
+ wandb_project:
43
+ wandb_entity:
44
+ wandb_watch:
45
+ wandb_name:
46
+ wandb_log_model:
47
+ output_dir: ./outputs/qlora_short-out
48
+ gradient_accumulation_steps: 1
49
+ micro_batch_size: 1
50
+ num_epochs: 1
51
+ optimizer: paged_adamw_32bit
52
+ torchdistx_path:
53
+ lr_scheduler: cosine
54
+ learning_rate: 0.0002
55
+ train_on_inputs: false
56
+ group_by_length: false
57
+ bf16: false
58
+ fp16: true
59
+ tf32: false
60
+ gradient_checkpointing: true
61
+ early_stopping_patience:
62
+ resume_from_checkpoint:
63
+ local_rank:
64
+ logging_steps: 1
65
+ xformers_attention:
66
+ flash_attention: true
67
+ gptq_groupsize:
68
+ gptq_model_v1:
69
+ warmup_steps: 20
70
+ evals_per_epoch: 4
71
+ saves_per_epoch: 1
72
+ debug:
73
+ deepspeed:
74
+ weight_decay: 0.1
75
+ fsdp:
76
+ fsdp_config:
77
+ special_tokens:
78
+ bos_token: "<s>"
79
+ eos_token: "</s>"
80
+ unk_token: "<unk>"
81
+
82
+ ```
83
+
84
+ </details><br>
85
+
86
+ # outputs/qlora_short-out
87
+
88
+ This model is a fine-tuned version of [openlm-research/open_llama_3b_v2](https://huggingface.co/openlm-research/open_llama_3b_v2) on the None dataset.
89
+ It achieves the following results on the evaluation set:
90
+ - Loss: 1.1098
91
+
92
+ ## Model description
93
+
94
+ More information needed
95
+
96
+ ## Intended uses & limitations
97
+
98
+ More information needed
99
+
100
+ ## Training and evaluation data
101
+
102
+ More information needed
103
+
104
+ ## Training procedure
105
+
106
+ ### Training hyperparameters
107
+
108
+ The following hyperparameters were used during training:
109
+ - learning_rate: 0.0002
110
+ - train_batch_size: 2
111
+ - eval_batch_size: 2
112
+ - seed: 42
113
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
114
+ - lr_scheduler_type: cosine
115
+ - lr_scheduler_warmup_steps: 20
116
+ - num_epochs: 1
117
+ - mixed_precision_training: Native AMP
118
+
119
+ ### Training results
120
+
121
+ | Training Loss | Epoch | Step | Validation Loss |
122
+ |:-------------:|:------:|:----:|:---------------:|
123
+ | 1.458 | 0.0023 | 1 | 1.3469 |
124
+ | 1.0411 | 0.2511 | 110 | 1.1380 |
125
+ | 1.143 | 0.5023 | 220 | 1.1200 |
126
+ | 1.2744 | 0.7534 | 330 | 1.1098 |
127
+
128
+
129
+ ### Framework versions
130
+
131
+ - PEFT 0.12.0
132
+ - Transformers 4.44.2
133
+ - Pytorch 2.3.1+cu121
134
+ - Datasets 2.20.0
135
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "openlm-research/open_llama_3b_v2",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": false,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "gate_proj",
24
+ "o_proj",
25
+ "v_proj",
26
+ "k_proj",
27
+ "down_proj",
28
+ "q_proj",
29
+ "up_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41b3ffaaa8d6164c978ed5b2e7e7c4777cf1a1a966ea60f24f860e6eb831430c
3
+ size 50982842
checkpoint-438/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: openlm-research/open_llama_3b_v2
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.12.0
checkpoint-438/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "openlm-research/open_llama_3b_v2",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "up_proj",
24
+ "q_proj",
25
+ "down_proj",
26
+ "k_proj",
27
+ "v_proj",
28
+ "gate_proj",
29
+ "o_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-438/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74f2ae59c1641654bb27a8af372474e146f2ef03a76956fc0c0c0e5a4ec26103
3
+ size 50899792
checkpoint-438/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae65f2658b87b30251913442068bb66bf5a3c590208cb407326829a717c39855
3
+ size 101919290
checkpoint-438/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c1c298c1326468cd03257fc363b89ee3630aa9bad95a34029f748a06e23ac4c
3
+ size 14244
checkpoint-438/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:013116bfde523d756267ebeb7c4f5a0f122d4861f97d62af0a4612a3a9fefa90
3
+ size 1064
checkpoint-438/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
checkpoint-438/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91b289e85fa20fd375d8b33dc12f77616f18abc6359804471d1fafcb425fecb8
3
+ size 511574
checkpoint-438/tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": true,
35
+ "model_max_length": 2048,
36
+ "pad_token": "</s>",
37
+ "sp_model_kwargs": {},
38
+ "spaces_between_special_tokens": false,
39
+ "tokenizer_class": "LlamaTokenizer",
40
+ "unk_token": "<unk>",
41
+ "use_default_system_prompt": false,
42
+ "use_fast": true
43
+ }
checkpoint-438/trainer_state.json ADDED
@@ -0,0 +1,3131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 110,
6
+ "global_step": 438,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.00228310502283105,
13
+ "grad_norm": 49763.3828125,
14
+ "learning_rate": 1e-05,
15
+ "loss": 1.458,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.00228310502283105,
20
+ "eval_loss": 1.3469293117523193,
21
+ "eval_runtime": 5.899,
22
+ "eval_samples_per_second": 16.952,
23
+ "eval_steps_per_second": 8.476,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.0045662100456621,
28
+ "grad_norm": 47645.4765625,
29
+ "learning_rate": 2e-05,
30
+ "loss": 1.663,
31
+ "step": 2
32
+ },
33
+ {
34
+ "epoch": 0.00684931506849315,
35
+ "grad_norm": 53930.67578125,
36
+ "learning_rate": 3e-05,
37
+ "loss": 1.2375,
38
+ "step": 3
39
+ },
40
+ {
41
+ "epoch": 0.0091324200913242,
42
+ "grad_norm": 79495.359375,
43
+ "learning_rate": 4e-05,
44
+ "loss": 1.4099,
45
+ "step": 4
46
+ },
47
+ {
48
+ "epoch": 0.01141552511415525,
49
+ "grad_norm": 51649.53125,
50
+ "learning_rate": 5e-05,
51
+ "loss": 1.3848,
52
+ "step": 5
53
+ },
54
+ {
55
+ "epoch": 0.0136986301369863,
56
+ "grad_norm": 62501.76171875,
57
+ "learning_rate": 6e-05,
58
+ "loss": 1.3113,
59
+ "step": 6
60
+ },
61
+ {
62
+ "epoch": 0.01598173515981735,
63
+ "grad_norm": 56830.796875,
64
+ "learning_rate": 7e-05,
65
+ "loss": 1.394,
66
+ "step": 7
67
+ },
68
+ {
69
+ "epoch": 0.0182648401826484,
70
+ "grad_norm": 69029.3671875,
71
+ "learning_rate": 8e-05,
72
+ "loss": 1.1999,
73
+ "step": 8
74
+ },
75
+ {
76
+ "epoch": 0.02054794520547945,
77
+ "grad_norm": 77872.7421875,
78
+ "learning_rate": 9e-05,
79
+ "loss": 1.4121,
80
+ "step": 9
81
+ },
82
+ {
83
+ "epoch": 0.0228310502283105,
84
+ "grad_norm": 67018.5234375,
85
+ "learning_rate": 0.0001,
86
+ "loss": 1.5292,
87
+ "step": 10
88
+ },
89
+ {
90
+ "epoch": 0.02511415525114155,
91
+ "grad_norm": 52883.80078125,
92
+ "learning_rate": 0.00011000000000000002,
93
+ "loss": 1.1904,
94
+ "step": 11
95
+ },
96
+ {
97
+ "epoch": 0.0273972602739726,
98
+ "grad_norm": 89700.34375,
99
+ "learning_rate": 0.00012,
100
+ "loss": 1.6428,
101
+ "step": 12
102
+ },
103
+ {
104
+ "epoch": 0.02968036529680365,
105
+ "grad_norm": 70959.6875,
106
+ "learning_rate": 0.00013000000000000002,
107
+ "loss": 1.8412,
108
+ "step": 13
109
+ },
110
+ {
111
+ "epoch": 0.0319634703196347,
112
+ "grad_norm": 123958.625,
113
+ "learning_rate": 0.00014,
114
+ "loss": 1.3549,
115
+ "step": 14
116
+ },
117
+ {
118
+ "epoch": 0.03424657534246575,
119
+ "grad_norm": 55201.86328125,
120
+ "learning_rate": 0.00015000000000000001,
121
+ "loss": 0.717,
122
+ "step": 15
123
+ },
124
+ {
125
+ "epoch": 0.0365296803652968,
126
+ "grad_norm": 79495.921875,
127
+ "learning_rate": 0.00016,
128
+ "loss": 2.5554,
129
+ "step": 16
130
+ },
131
+ {
132
+ "epoch": 0.03881278538812785,
133
+ "grad_norm": 94823.359375,
134
+ "learning_rate": 0.00017,
135
+ "loss": 1.3731,
136
+ "step": 17
137
+ },
138
+ {
139
+ "epoch": 0.0410958904109589,
140
+ "grad_norm": 78739.7890625,
141
+ "learning_rate": 0.00018,
142
+ "loss": 0.9813,
143
+ "step": 18
144
+ },
145
+ {
146
+ "epoch": 0.04337899543378995,
147
+ "grad_norm": 70018.5234375,
148
+ "learning_rate": 0.00019,
149
+ "loss": 1.3944,
150
+ "step": 19
151
+ },
152
+ {
153
+ "epoch": 0.045662100456621,
154
+ "grad_norm": 93789.5625,
155
+ "learning_rate": 0.0002,
156
+ "loss": 1.3604,
157
+ "step": 20
158
+ },
159
+ {
160
+ "epoch": 0.04794520547945205,
161
+ "grad_norm": 69996.328125,
162
+ "learning_rate": 0.0001999971756719333,
163
+ "loss": 0.7975,
164
+ "step": 21
165
+ },
166
+ {
167
+ "epoch": 0.0502283105022831,
168
+ "grad_norm": 62440.68359375,
169
+ "learning_rate": 0.00019998870284726968,
170
+ "loss": 1.3632,
171
+ "step": 22
172
+ },
173
+ {
174
+ "epoch": 0.05251141552511415,
175
+ "grad_norm": 57081.87890625,
176
+ "learning_rate": 0.00019997458200460993,
177
+ "loss": 1.181,
178
+ "step": 23
179
+ },
180
+ {
181
+ "epoch": 0.0547945205479452,
182
+ "grad_norm": 68026.21875,
183
+ "learning_rate": 0.00019995481394159188,
184
+ "loss": 1.2883,
185
+ "step": 24
186
+ },
187
+ {
188
+ "epoch": 0.05707762557077625,
189
+ "grad_norm": 68773.109375,
190
+ "learning_rate": 0.0001999293997748454,
191
+ "loss": 1.209,
192
+ "step": 25
193
+ },
194
+ {
195
+ "epoch": 0.0593607305936073,
196
+ "grad_norm": 54953.078125,
197
+ "learning_rate": 0.00019989834093992945,
198
+ "loss": 1.2207,
199
+ "step": 26
200
+ },
201
+ {
202
+ "epoch": 0.06164383561643835,
203
+ "grad_norm": 68915.734375,
204
+ "learning_rate": 0.00019986163919125075,
205
+ "loss": 1.1395,
206
+ "step": 27
207
+ },
208
+ {
209
+ "epoch": 0.0639269406392694,
210
+ "grad_norm": 77723.328125,
211
+ "learning_rate": 0.00019981929660196492,
212
+ "loss": 1.4178,
213
+ "step": 28
214
+ },
215
+ {
216
+ "epoch": 0.06621004566210045,
217
+ "grad_norm": 57558.64453125,
218
+ "learning_rate": 0.0001997713155638592,
219
+ "loss": 1.3991,
220
+ "step": 29
221
+ },
222
+ {
223
+ "epoch": 0.0684931506849315,
224
+ "grad_norm": 57487.20703125,
225
+ "learning_rate": 0.00019971769878721743,
226
+ "loss": 1.2361,
227
+ "step": 30
228
+ },
229
+ {
230
+ "epoch": 0.07077625570776255,
231
+ "grad_norm": 56958.12109375,
232
+ "learning_rate": 0.000199658449300667,
233
+ "loss": 1.0886,
234
+ "step": 31
235
+ },
236
+ {
237
+ "epoch": 0.0730593607305936,
238
+ "grad_norm": 69826.890625,
239
+ "learning_rate": 0.00019959357045100764,
240
+ "loss": 1.2462,
241
+ "step": 32
242
+ },
243
+ {
244
+ "epoch": 0.07534246575342465,
245
+ "grad_norm": 83606.59375,
246
+ "learning_rate": 0.00019952306590302247,
247
+ "loss": 1.5512,
248
+ "step": 33
249
+ },
250
+ {
251
+ "epoch": 0.0776255707762557,
252
+ "grad_norm": 96047.1171875,
253
+ "learning_rate": 0.00019944693963927092,
254
+ "loss": 1.2642,
255
+ "step": 34
256
+ },
257
+ {
258
+ "epoch": 0.07990867579908675,
259
+ "grad_norm": 75340.171875,
260
+ "learning_rate": 0.00019936519595986394,
261
+ "loss": 1.3945,
262
+ "step": 35
263
+ },
264
+ {
265
+ "epoch": 0.0821917808219178,
266
+ "grad_norm": 53603.67578125,
267
+ "learning_rate": 0.00019927783948222084,
268
+ "loss": 0.8141,
269
+ "step": 36
270
+ },
271
+ {
272
+ "epoch": 0.08447488584474885,
273
+ "grad_norm": 60950.3359375,
274
+ "learning_rate": 0.00019918487514080865,
275
+ "loss": 1.1856,
276
+ "step": 37
277
+ },
278
+ {
279
+ "epoch": 0.0867579908675799,
280
+ "grad_norm": 57694.49609375,
281
+ "learning_rate": 0.00019908630818686338,
282
+ "loss": 0.5208,
283
+ "step": 38
284
+ },
285
+ {
286
+ "epoch": 0.08904109589041095,
287
+ "grad_norm": 83846.890625,
288
+ "learning_rate": 0.0001989821441880933,
289
+ "loss": 1.2401,
290
+ "step": 39
291
+ },
292
+ {
293
+ "epoch": 0.091324200913242,
294
+ "grad_norm": 60294.91796875,
295
+ "learning_rate": 0.00019887238902836448,
296
+ "loss": 1.3362,
297
+ "step": 40
298
+ },
299
+ {
300
+ "epoch": 0.09360730593607305,
301
+ "grad_norm": 55908.76953125,
302
+ "learning_rate": 0.00019875704890736853,
303
+ "loss": 1.0295,
304
+ "step": 41
305
+ },
306
+ {
307
+ "epoch": 0.0958904109589041,
308
+ "grad_norm": 79842.359375,
309
+ "learning_rate": 0.00019863613034027224,
310
+ "loss": 1.3764,
311
+ "step": 42
312
+ },
313
+ {
314
+ "epoch": 0.09817351598173515,
315
+ "grad_norm": 53915.5703125,
316
+ "learning_rate": 0.0001985096401573497,
317
+ "loss": 1.1399,
318
+ "step": 43
319
+ },
320
+ {
321
+ "epoch": 0.1004566210045662,
322
+ "grad_norm": 56798.44921875,
323
+ "learning_rate": 0.00019837758550359636,
324
+ "loss": 0.9945,
325
+ "step": 44
326
+ },
327
+ {
328
+ "epoch": 0.10273972602739725,
329
+ "grad_norm": 72487.9921875,
330
+ "learning_rate": 0.0001982399738383255,
331
+ "loss": 1.4328,
332
+ "step": 45
333
+ },
334
+ {
335
+ "epoch": 0.1050228310502283,
336
+ "grad_norm": 56317.5546875,
337
+ "learning_rate": 0.00019809681293474693,
338
+ "loss": 1.2217,
339
+ "step": 46
340
+ },
341
+ {
342
+ "epoch": 0.10730593607305935,
343
+ "grad_norm": 108634.3203125,
344
+ "learning_rate": 0.0001979481108795278,
345
+ "loss": 1.5159,
346
+ "step": 47
347
+ },
348
+ {
349
+ "epoch": 0.1095890410958904,
350
+ "grad_norm": 77436.7578125,
351
+ "learning_rate": 0.00019779387607233586,
352
+ "loss": 1.051,
353
+ "step": 48
354
+ },
355
+ {
356
+ "epoch": 0.11187214611872145,
357
+ "grad_norm": 61779.69921875,
358
+ "learning_rate": 0.00019763411722536502,
359
+ "loss": 1.2487,
360
+ "step": 49
361
+ },
362
+ {
363
+ "epoch": 0.1141552511415525,
364
+ "grad_norm": 74456.3359375,
365
+ "learning_rate": 0.00019746884336284317,
366
+ "loss": 1.5362,
367
+ "step": 50
368
+ },
369
+ {
370
+ "epoch": 0.11643835616438356,
371
+ "grad_norm": 65273.90625,
372
+ "learning_rate": 0.00019729806382052248,
373
+ "loss": 1.125,
374
+ "step": 51
375
+ },
376
+ {
377
+ "epoch": 0.1187214611872146,
378
+ "grad_norm": 74326.5390625,
379
+ "learning_rate": 0.00019712178824515212,
380
+ "loss": 1.4872,
381
+ "step": 52
382
+ },
383
+ {
384
+ "epoch": 0.12100456621004566,
385
+ "grad_norm": 66311.375,
386
+ "learning_rate": 0.00019694002659393305,
387
+ "loss": 1.3129,
388
+ "step": 53
389
+ },
390
+ {
391
+ "epoch": 0.1232876712328767,
392
+ "grad_norm": 93956.140625,
393
+ "learning_rate": 0.00019675278913395606,
394
+ "loss": 1.1963,
395
+ "step": 54
396
+ },
397
+ {
398
+ "epoch": 0.12557077625570776,
399
+ "grad_norm": 54108.7421875,
400
+ "learning_rate": 0.0001965600864416213,
401
+ "loss": 1.3549,
402
+ "step": 55
403
+ },
404
+ {
405
+ "epoch": 0.1278538812785388,
406
+ "grad_norm": 82672.5234375,
407
+ "learning_rate": 0.00019636192940204134,
408
+ "loss": 1.3415,
409
+ "step": 56
410
+ },
411
+ {
412
+ "epoch": 0.13013698630136986,
413
+ "grad_norm": 79705.0234375,
414
+ "learning_rate": 0.00019615832920842586,
415
+ "loss": 1.0711,
416
+ "step": 57
417
+ },
418
+ {
419
+ "epoch": 0.1324200913242009,
420
+ "grad_norm": 66569.171875,
421
+ "learning_rate": 0.00019594929736144976,
422
+ "loss": 1.5752,
423
+ "step": 58
424
+ },
425
+ {
426
+ "epoch": 0.13470319634703196,
427
+ "grad_norm": 54818.6953125,
428
+ "learning_rate": 0.0001957348456686032,
429
+ "loss": 1.2086,
430
+ "step": 59
431
+ },
432
+ {
433
+ "epoch": 0.136986301369863,
434
+ "grad_norm": 84023.5625,
435
+ "learning_rate": 0.00019551498624352496,
436
+ "loss": 1.2713,
437
+ "step": 60
438
+ },
439
+ {
440
+ "epoch": 0.13926940639269406,
441
+ "grad_norm": 58488.77734375,
442
+ "learning_rate": 0.00019528973150531787,
443
+ "loss": 1.1957,
444
+ "step": 61
445
+ },
446
+ {
447
+ "epoch": 0.1415525114155251,
448
+ "grad_norm": 59256.1328125,
449
+ "learning_rate": 0.00019505909417784754,
450
+ "loss": 1.1863,
451
+ "step": 62
452
+ },
453
+ {
454
+ "epoch": 0.14383561643835616,
455
+ "grad_norm": 58009.8359375,
456
+ "learning_rate": 0.00019482308728902356,
457
+ "loss": 1.0046,
458
+ "step": 63
459
+ },
460
+ {
461
+ "epoch": 0.1461187214611872,
462
+ "grad_norm": 58970.30859375,
463
+ "learning_rate": 0.00019458172417006347,
464
+ "loss": 1.3599,
465
+ "step": 64
466
+ },
467
+ {
468
+ "epoch": 0.14840182648401826,
469
+ "grad_norm": 75271.78125,
470
+ "learning_rate": 0.00019433501845473995,
471
+ "loss": 1.175,
472
+ "step": 65
473
+ },
474
+ {
475
+ "epoch": 0.1506849315068493,
476
+ "grad_norm": 60125.44140625,
477
+ "learning_rate": 0.00019408298407861042,
478
+ "loss": 1.2096,
479
+ "step": 66
480
+ },
481
+ {
482
+ "epoch": 0.15296803652968036,
483
+ "grad_norm": 62565.88671875,
484
+ "learning_rate": 0.00019382563527823026,
485
+ "loss": 1.0284,
486
+ "step": 67
487
+ },
488
+ {
489
+ "epoch": 0.1552511415525114,
490
+ "grad_norm": 64562.3359375,
491
+ "learning_rate": 0.00019356298659034817,
492
+ "loss": 1.1955,
493
+ "step": 68
494
+ },
495
+ {
496
+ "epoch": 0.15753424657534246,
497
+ "grad_norm": 61627.109375,
498
+ "learning_rate": 0.00019329505285108542,
499
+ "loss": 1.1498,
500
+ "step": 69
501
+ },
502
+ {
503
+ "epoch": 0.1598173515981735,
504
+ "grad_norm": 65598.3515625,
505
+ "learning_rate": 0.00019302184919509755,
506
+ "loss": 1.046,
507
+ "step": 70
508
+ },
509
+ {
510
+ "epoch": 0.16210045662100456,
511
+ "grad_norm": 56694.41015625,
512
+ "learning_rate": 0.00019274339105471971,
513
+ "loss": 0.6779,
514
+ "step": 71
515
+ },
516
+ {
517
+ "epoch": 0.1643835616438356,
518
+ "grad_norm": 82800.1015625,
519
+ "learning_rate": 0.00019245969415909465,
520
+ "loss": 1.2381,
521
+ "step": 72
522
+ },
523
+ {
524
+ "epoch": 0.16666666666666666,
525
+ "grad_norm": 44540.94140625,
526
+ "learning_rate": 0.00019217077453328449,
527
+ "loss": 1.0871,
528
+ "step": 73
529
+ },
530
+ {
531
+ "epoch": 0.1689497716894977,
532
+ "grad_norm": 62427.3125,
533
+ "learning_rate": 0.0001918766484973654,
534
+ "loss": 1.4182,
535
+ "step": 74
536
+ },
537
+ {
538
+ "epoch": 0.17123287671232876,
539
+ "grad_norm": 62395.83203125,
540
+ "learning_rate": 0.00019157733266550575,
541
+ "loss": 1.1391,
542
+ "step": 75
543
+ },
544
+ {
545
+ "epoch": 0.1735159817351598,
546
+ "grad_norm": 86479.0546875,
547
+ "learning_rate": 0.0001912728439450276,
548
+ "loss": 1.4769,
549
+ "step": 76
550
+ },
551
+ {
552
+ "epoch": 0.17579908675799086,
553
+ "grad_norm": 52077.796875,
554
+ "learning_rate": 0.00019096319953545185,
555
+ "loss": 0.9951,
556
+ "step": 77
557
+ },
558
+ {
559
+ "epoch": 0.1780821917808219,
560
+ "grad_norm": 65181.8671875,
561
+ "learning_rate": 0.0001906484169275263,
562
+ "loss": 1.1435,
563
+ "step": 78
564
+ },
565
+ {
566
+ "epoch": 0.18036529680365296,
567
+ "grad_norm": 61927.88671875,
568
+ "learning_rate": 0.00019032851390223812,
569
+ "loss": 1.3638,
570
+ "step": 79
571
+ },
572
+ {
573
+ "epoch": 0.182648401826484,
574
+ "grad_norm": 65557.1796875,
575
+ "learning_rate": 0.00019000350852980909,
576
+ "loss": 1.0751,
577
+ "step": 80
578
+ },
579
+ {
580
+ "epoch": 0.18493150684931506,
581
+ "grad_norm": 67026.625,
582
+ "learning_rate": 0.00018967341916867518,
583
+ "loss": 1.4392,
584
+ "step": 81
585
+ },
586
+ {
587
+ "epoch": 0.1872146118721461,
588
+ "grad_norm": 65608.7578125,
589
+ "learning_rate": 0.00018933826446444933,
590
+ "loss": 1.4204,
591
+ "step": 82
592
+ },
593
+ {
594
+ "epoch": 0.18949771689497716,
595
+ "grad_norm": 62439.32421875,
596
+ "learning_rate": 0.0001889980633488683,
597
+ "loss": 1.6256,
598
+ "step": 83
599
+ },
600
+ {
601
+ "epoch": 0.1917808219178082,
602
+ "grad_norm": 58315.05078125,
603
+ "learning_rate": 0.00018865283503872324,
604
+ "loss": 1.3327,
605
+ "step": 84
606
+ },
607
+ {
608
+ "epoch": 0.19406392694063926,
609
+ "grad_norm": 63276.6953125,
610
+ "learning_rate": 0.00018830259903477426,
611
+ "loss": 1.3005,
612
+ "step": 85
613
+ },
614
+ {
615
+ "epoch": 0.1963470319634703,
616
+ "grad_norm": 60068.3125,
617
+ "learning_rate": 0.0001879473751206489,
618
+ "loss": 1.3073,
619
+ "step": 86
620
+ },
621
+ {
622
+ "epoch": 0.19863013698630136,
623
+ "grad_norm": 60654.34375,
624
+ "learning_rate": 0.0001875871833617246,
625
+ "loss": 1.1668,
626
+ "step": 87
627
+ },
628
+ {
629
+ "epoch": 0.2009132420091324,
630
+ "grad_norm": 49492.6875,
631
+ "learning_rate": 0.0001872220441039952,
632
+ "loss": 0.8938,
633
+ "step": 88
634
+ },
635
+ {
636
+ "epoch": 0.20319634703196346,
637
+ "grad_norm": 82807.6953125,
638
+ "learning_rate": 0.0001868519779729218,
639
+ "loss": 1.014,
640
+ "step": 89
641
+ },
642
+ {
643
+ "epoch": 0.2054794520547945,
644
+ "grad_norm": 50824.89453125,
645
+ "learning_rate": 0.0001864770058722676,
646
+ "loss": 1.0941,
647
+ "step": 90
648
+ },
649
+ {
650
+ "epoch": 0.20776255707762556,
651
+ "grad_norm": 53105.984375,
652
+ "learning_rate": 0.00018609714898291718,
653
+ "loss": 0.7681,
654
+ "step": 91
655
+ },
656
+ {
657
+ "epoch": 0.2100456621004566,
658
+ "grad_norm": 57766.73046875,
659
+ "learning_rate": 0.00018571242876167996,
660
+ "loss": 1.0531,
661
+ "step": 92
662
+ },
663
+ {
664
+ "epoch": 0.21232876712328766,
665
+ "grad_norm": 66334.5625,
666
+ "learning_rate": 0.0001853228669400784,
667
+ "loss": 1.2699,
668
+ "step": 93
669
+ },
670
+ {
671
+ "epoch": 0.2146118721461187,
672
+ "grad_norm": 54520.6015625,
673
+ "learning_rate": 0.00018492848552312014,
674
+ "loss": 1.4723,
675
+ "step": 94
676
+ },
677
+ {
678
+ "epoch": 0.21689497716894976,
679
+ "grad_norm": 75962.671875,
680
+ "learning_rate": 0.00018452930678805536,
681
+ "loss": 1.379,
682
+ "step": 95
683
+ },
684
+ {
685
+ "epoch": 0.2191780821917808,
686
+ "grad_norm": 57191.44921875,
687
+ "learning_rate": 0.00018412535328311814,
688
+ "loss": 1.3189,
689
+ "step": 96
690
+ },
691
+ {
692
+ "epoch": 0.22146118721461186,
693
+ "grad_norm": 63262.0625,
694
+ "learning_rate": 0.00018371664782625287,
695
+ "loss": 0.9332,
696
+ "step": 97
697
+ },
698
+ {
699
+ "epoch": 0.2237442922374429,
700
+ "grad_norm": 55938.12890625,
701
+ "learning_rate": 0.00018330321350382544,
702
+ "loss": 1.3675,
703
+ "step": 98
704
+ },
705
+ {
706
+ "epoch": 0.22602739726027396,
707
+ "grad_norm": 48929.921875,
708
+ "learning_rate": 0.00018288507366931905,
709
+ "loss": 1.0751,
710
+ "step": 99
711
+ },
712
+ {
713
+ "epoch": 0.228310502283105,
714
+ "grad_norm": 60707.87890625,
715
+ "learning_rate": 0.00018246225194201517,
716
+ "loss": 1.2708,
717
+ "step": 100
718
+ },
719
+ {
720
+ "epoch": 0.23059360730593606,
721
+ "grad_norm": 59498.01171875,
722
+ "learning_rate": 0.00018203477220565912,
723
+ "loss": 1.3661,
724
+ "step": 101
725
+ },
726
+ {
727
+ "epoch": 0.2328767123287671,
728
+ "grad_norm": 52212.64453125,
729
+ "learning_rate": 0.00018160265860711134,
730
+ "loss": 0.9827,
731
+ "step": 102
732
+ },
733
+ {
734
+ "epoch": 0.23515981735159816,
735
+ "grad_norm": 47361.78515625,
736
+ "learning_rate": 0.00018116593555498307,
737
+ "loss": 1.0684,
738
+ "step": 103
739
+ },
740
+ {
741
+ "epoch": 0.2374429223744292,
742
+ "grad_norm": 63993.5078125,
743
+ "learning_rate": 0.0001807246277182578,
744
+ "loss": 1.2669,
745
+ "step": 104
746
+ },
747
+ {
748
+ "epoch": 0.23972602739726026,
749
+ "grad_norm": 55152.2578125,
750
+ "learning_rate": 0.0001802787600248977,
751
+ "loss": 0.9088,
752
+ "step": 105
753
+ },
754
+ {
755
+ "epoch": 0.2420091324200913,
756
+ "grad_norm": 49352.40625,
757
+ "learning_rate": 0.0001798283576604356,
758
+ "loss": 1.0416,
759
+ "step": 106
760
+ },
761
+ {
762
+ "epoch": 0.24429223744292236,
763
+ "grad_norm": 60283.234375,
764
+ "learning_rate": 0.0001793734460665523,
765
+ "loss": 1.3194,
766
+ "step": 107
767
+ },
768
+ {
769
+ "epoch": 0.2465753424657534,
770
+ "grad_norm": 46655.90234375,
771
+ "learning_rate": 0.00017891405093963938,
772
+ "loss": 0.8581,
773
+ "step": 108
774
+ },
775
+ {
776
+ "epoch": 0.24885844748858446,
777
+ "grad_norm": 65966.4609375,
778
+ "learning_rate": 0.0001784501982293479,
779
+ "loss": 0.9858,
780
+ "step": 109
781
+ },
782
+ {
783
+ "epoch": 0.2511415525114155,
784
+ "grad_norm": 72021.5703125,
785
+ "learning_rate": 0.00017798191413712243,
786
+ "loss": 1.0411,
787
+ "step": 110
788
+ },
789
+ {
790
+ "epoch": 0.2511415525114155,
791
+ "eval_loss": 1.1379607915878296,
792
+ "eval_runtime": 6.0407,
793
+ "eval_samples_per_second": 16.554,
794
+ "eval_steps_per_second": 8.277,
795
+ "step": 110
796
+ },
797
+ {
798
+ "epoch": 0.2534246575342466,
799
+ "grad_norm": 60144.625,
800
+ "learning_rate": 0.0001775092251147211,
801
+ "loss": 1.1175,
802
+ "step": 111
803
+ },
804
+ {
805
+ "epoch": 0.2557077625570776,
806
+ "grad_norm": 55304.96484375,
807
+ "learning_rate": 0.0001770321578627213,
808
+ "loss": 0.7432,
809
+ "step": 112
810
+ },
811
+ {
812
+ "epoch": 0.2579908675799087,
813
+ "grad_norm": 50044.91015625,
814
+ "learning_rate": 0.00017655073932901168,
815
+ "loss": 0.8335,
816
+ "step": 113
817
+ },
818
+ {
819
+ "epoch": 0.2602739726027397,
820
+ "grad_norm": 63828.10546875,
821
+ "learning_rate": 0.0001760649967072697,
822
+ "loss": 1.1305,
823
+ "step": 114
824
+ },
825
+ {
826
+ "epoch": 0.2625570776255708,
827
+ "grad_norm": 56665.87890625,
828
+ "learning_rate": 0.00017557495743542585,
829
+ "loss": 1.1719,
830
+ "step": 115
831
+ },
832
+ {
833
+ "epoch": 0.2648401826484018,
834
+ "grad_norm": 63755.87109375,
835
+ "learning_rate": 0.00017508064919411344,
836
+ "loss": 1.0547,
837
+ "step": 116
838
+ },
839
+ {
840
+ "epoch": 0.2671232876712329,
841
+ "grad_norm": 55144.74609375,
842
+ "learning_rate": 0.00017458209990510527,
843
+ "loss": 0.8684,
844
+ "step": 117
845
+ },
846
+ {
847
+ "epoch": 0.2694063926940639,
848
+ "grad_norm": 68380.3125,
849
+ "learning_rate": 0.00017407933772973637,
850
+ "loss": 1.1672,
851
+ "step": 118
852
+ },
853
+ {
854
+ "epoch": 0.271689497716895,
855
+ "grad_norm": 67942.5,
856
+ "learning_rate": 0.00017357239106731317,
857
+ "loss": 1.3715,
858
+ "step": 119
859
+ },
860
+ {
861
+ "epoch": 0.273972602739726,
862
+ "grad_norm": 50505.95703125,
863
+ "learning_rate": 0.00017306128855350942,
864
+ "loss": 0.9512,
865
+ "step": 120
866
+ },
867
+ {
868
+ "epoch": 0.2762557077625571,
869
+ "grad_norm": 56973.859375,
870
+ "learning_rate": 0.0001725460590587486,
871
+ "loss": 0.9059,
872
+ "step": 121
873
+ },
874
+ {
875
+ "epoch": 0.2785388127853881,
876
+ "grad_norm": 47352.71484375,
877
+ "learning_rate": 0.00017202673168657318,
878
+ "loss": 0.9492,
879
+ "step": 122
880
+ },
881
+ {
882
+ "epoch": 0.2808219178082192,
883
+ "grad_norm": 60938.4921875,
884
+ "learning_rate": 0.0001715033357720006,
885
+ "loss": 1.1549,
886
+ "step": 123
887
+ },
888
+ {
889
+ "epoch": 0.2831050228310502,
890
+ "grad_norm": 50557.2265625,
891
+ "learning_rate": 0.00017097590087986633,
892
+ "loss": 1.1143,
893
+ "step": 124
894
+ },
895
+ {
896
+ "epoch": 0.2853881278538813,
897
+ "grad_norm": 61211.25390625,
898
+ "learning_rate": 0.00017044445680315372,
899
+ "loss": 1.1829,
900
+ "step": 125
901
+ },
902
+ {
903
+ "epoch": 0.2876712328767123,
904
+ "grad_norm": 58093.75,
905
+ "learning_rate": 0.00016990903356131124,
906
+ "loss": 1.1329,
907
+ "step": 126
908
+ },
909
+ {
910
+ "epoch": 0.2899543378995434,
911
+ "grad_norm": 62758.921875,
912
+ "learning_rate": 0.00016936966139855663,
913
+ "loss": 0.8082,
914
+ "step": 127
915
+ },
916
+ {
917
+ "epoch": 0.2922374429223744,
918
+ "grad_norm": 55444.03515625,
919
+ "learning_rate": 0.00016882637078216868,
920
+ "loss": 1.0763,
921
+ "step": 128
922
+ },
923
+ {
924
+ "epoch": 0.2945205479452055,
925
+ "grad_norm": 45902.125,
926
+ "learning_rate": 0.0001682791924007661,
927
+ "loss": 0.5558,
928
+ "step": 129
929
+ },
930
+ {
931
+ "epoch": 0.2968036529680365,
932
+ "grad_norm": 67659.0546875,
933
+ "learning_rate": 0.00016772815716257412,
934
+ "loss": 0.8364,
935
+ "step": 130
936
+ },
937
+ {
938
+ "epoch": 0.2990867579908676,
939
+ "grad_norm": 58708.359375,
940
+ "learning_rate": 0.0001671732961936785,
941
+ "loss": 1.1031,
942
+ "step": 131
943
+ },
944
+ {
945
+ "epoch": 0.3013698630136986,
946
+ "grad_norm": 44826.8671875,
947
+ "learning_rate": 0.00016661464083626734,
948
+ "loss": 0.6981,
949
+ "step": 132
950
+ },
951
+ {
952
+ "epoch": 0.3036529680365297,
953
+ "grad_norm": 49845.3125,
954
+ "learning_rate": 0.00016605222264686086,
955
+ "loss": 0.932,
956
+ "step": 133
957
+ },
958
+ {
959
+ "epoch": 0.3059360730593607,
960
+ "grad_norm": 68260.5703125,
961
+ "learning_rate": 0.00016548607339452853,
962
+ "loss": 1.1092,
963
+ "step": 134
964
+ },
965
+ {
966
+ "epoch": 0.3082191780821918,
967
+ "grad_norm": 51835.33203125,
968
+ "learning_rate": 0.00016491622505909482,
969
+ "loss": 1.175,
970
+ "step": 135
971
+ },
972
+ {
973
+ "epoch": 0.3105022831050228,
974
+ "grad_norm": 47168.08203125,
975
+ "learning_rate": 0.00016434270982933273,
976
+ "loss": 1.1088,
977
+ "step": 136
978
+ },
979
+ {
980
+ "epoch": 0.3127853881278539,
981
+ "grad_norm": 65501.48828125,
982
+ "learning_rate": 0.0001637655601011454,
983
+ "loss": 1.0026,
984
+ "step": 137
985
+ },
986
+ {
987
+ "epoch": 0.3150684931506849,
988
+ "grad_norm": 58316.41015625,
989
+ "learning_rate": 0.00016318480847573642,
990
+ "loss": 1.073,
991
+ "step": 138
992
+ },
993
+ {
994
+ "epoch": 0.317351598173516,
995
+ "grad_norm": 47404.8125,
996
+ "learning_rate": 0.00016260048775776804,
997
+ "loss": 0.9182,
998
+ "step": 139
999
+ },
1000
+ {
1001
+ "epoch": 0.319634703196347,
1002
+ "grad_norm": 67868.96875,
1003
+ "learning_rate": 0.00016201263095350833,
1004
+ "loss": 1.2406,
1005
+ "step": 140
1006
+ },
1007
+ {
1008
+ "epoch": 0.3219178082191781,
1009
+ "grad_norm": 62392.62109375,
1010
+ "learning_rate": 0.0001614212712689668,
1011
+ "loss": 0.6007,
1012
+ "step": 141
1013
+ },
1014
+ {
1015
+ "epoch": 0.3242009132420091,
1016
+ "grad_norm": 44980.81640625,
1017
+ "learning_rate": 0.00016082644210801844,
1018
+ "loss": 0.9601,
1019
+ "step": 142
1020
+ },
1021
+ {
1022
+ "epoch": 0.3264840182648402,
1023
+ "grad_norm": 53141.99609375,
1024
+ "learning_rate": 0.00016022817707051724,
1025
+ "loss": 0.7017,
1026
+ "step": 143
1027
+ },
1028
+ {
1029
+ "epoch": 0.3287671232876712,
1030
+ "grad_norm": 54526.64453125,
1031
+ "learning_rate": 0.00015962650995039783,
1032
+ "loss": 1.1726,
1033
+ "step": 144
1034
+ },
1035
+ {
1036
+ "epoch": 0.3310502283105023,
1037
+ "grad_norm": 60199.36328125,
1038
+ "learning_rate": 0.00015902147473376694,
1039
+ "loss": 1.0703,
1040
+ "step": 145
1041
+ },
1042
+ {
1043
+ "epoch": 0.3333333333333333,
1044
+ "grad_norm": 62003.74609375,
1045
+ "learning_rate": 0.00015841310559698343,
1046
+ "loss": 1.384,
1047
+ "step": 146
1048
+ },
1049
+ {
1050
+ "epoch": 0.3356164383561644,
1051
+ "grad_norm": 74924.28125,
1052
+ "learning_rate": 0.0001578014369047279,
1053
+ "loss": 1.4119,
1054
+ "step": 147
1055
+ },
1056
+ {
1057
+ "epoch": 0.3378995433789954,
1058
+ "grad_norm": 53506.546875,
1059
+ "learning_rate": 0.00015718650320806142,
1060
+ "loss": 1.1047,
1061
+ "step": 148
1062
+ },
1063
+ {
1064
+ "epoch": 0.3401826484018265,
1065
+ "grad_norm": 95636.2578125,
1066
+ "learning_rate": 0.00015656833924247398,
1067
+ "loss": 1.2457,
1068
+ "step": 149
1069
+ },
1070
+ {
1071
+ "epoch": 0.3424657534246575,
1072
+ "grad_norm": 84311.3671875,
1073
+ "learning_rate": 0.00015594697992592232,
1074
+ "loss": 1.8571,
1075
+ "step": 150
1076
+ },
1077
+ {
1078
+ "epoch": 0.3447488584474886,
1079
+ "grad_norm": 49623.90625,
1080
+ "learning_rate": 0.00015532246035685756,
1081
+ "loss": 0.8779,
1082
+ "step": 151
1083
+ },
1084
+ {
1085
+ "epoch": 0.3470319634703196,
1086
+ "grad_norm": 75495.09375,
1087
+ "learning_rate": 0.00015469481581224272,
1088
+ "loss": 1.1597,
1089
+ "step": 152
1090
+ },
1091
+ {
1092
+ "epoch": 0.3493150684931507,
1093
+ "grad_norm": 61961.66796875,
1094
+ "learning_rate": 0.00015406408174555976,
1095
+ "loss": 1.1428,
1096
+ "step": 153
1097
+ },
1098
+ {
1099
+ "epoch": 0.3515981735159817,
1100
+ "grad_norm": 63387.83203125,
1101
+ "learning_rate": 0.0001534302937848073,
1102
+ "loss": 1.2391,
1103
+ "step": 154
1104
+ },
1105
+ {
1106
+ "epoch": 0.3538812785388128,
1107
+ "grad_norm": 59753.67578125,
1108
+ "learning_rate": 0.00015279348773048786,
1109
+ "loss": 1.6021,
1110
+ "step": 155
1111
+ },
1112
+ {
1113
+ "epoch": 0.3561643835616438,
1114
+ "grad_norm": 61863.0078125,
1115
+ "learning_rate": 0.00015215369955358566,
1116
+ "loss": 1.241,
1117
+ "step": 156
1118
+ },
1119
+ {
1120
+ "epoch": 0.3584474885844749,
1121
+ "grad_norm": 57081.796875,
1122
+ "learning_rate": 0.0001515109653935348,
1123
+ "loss": 1.1971,
1124
+ "step": 157
1125
+ },
1126
+ {
1127
+ "epoch": 0.3607305936073059,
1128
+ "grad_norm": 71862.75,
1129
+ "learning_rate": 0.00015086532155617784,
1130
+ "loss": 1.1196,
1131
+ "step": 158
1132
+ },
1133
+ {
1134
+ "epoch": 0.363013698630137,
1135
+ "grad_norm": 53156.65234375,
1136
+ "learning_rate": 0.00015021680451171498,
1137
+ "loss": 1.1128,
1138
+ "step": 159
1139
+ },
1140
+ {
1141
+ "epoch": 0.365296803652968,
1142
+ "grad_norm": 65439.45703125,
1143
+ "learning_rate": 0.00014956545089264407,
1144
+ "loss": 1.1221,
1145
+ "step": 160
1146
+ },
1147
+ {
1148
+ "epoch": 0.3675799086757991,
1149
+ "grad_norm": 61590.7734375,
1150
+ "learning_rate": 0.0001489112974916912,
1151
+ "loss": 1.1785,
1152
+ "step": 161
1153
+ },
1154
+ {
1155
+ "epoch": 0.3698630136986301,
1156
+ "grad_norm": 44939.07421875,
1157
+ "learning_rate": 0.00014825438125973264,
1158
+ "loss": 1.1794,
1159
+ "step": 162
1160
+ },
1161
+ {
1162
+ "epoch": 0.3721461187214612,
1163
+ "grad_norm": 52118.67578125,
1164
+ "learning_rate": 0.00014759473930370736,
1165
+ "loss": 0.9673,
1166
+ "step": 163
1167
+ },
1168
+ {
1169
+ "epoch": 0.3744292237442922,
1170
+ "grad_norm": 56795.2734375,
1171
+ "learning_rate": 0.0001469324088845212,
1172
+ "loss": 1.0183,
1173
+ "step": 164
1174
+ },
1175
+ {
1176
+ "epoch": 0.3767123287671233,
1177
+ "grad_norm": 54740.015625,
1178
+ "learning_rate": 0.00014626742741494206,
1179
+ "loss": 1.2797,
1180
+ "step": 165
1181
+ },
1182
+ {
1183
+ "epoch": 0.3789954337899543,
1184
+ "grad_norm": 54072.75,
1185
+ "learning_rate": 0.00014559983245748638,
1186
+ "loss": 1.1121,
1187
+ "step": 166
1188
+ },
1189
+ {
1190
+ "epoch": 0.3812785388127854,
1191
+ "grad_norm": 63319.68359375,
1192
+ "learning_rate": 0.00014492966172229777,
1193
+ "loss": 1.1224,
1194
+ "step": 167
1195
+ },
1196
+ {
1197
+ "epoch": 0.3835616438356164,
1198
+ "grad_norm": 70814.8828125,
1199
+ "learning_rate": 0.00014425695306501658,
1200
+ "loss": 1.5157,
1201
+ "step": 168
1202
+ },
1203
+ {
1204
+ "epoch": 0.3858447488584475,
1205
+ "grad_norm": 57420.37890625,
1206
+ "learning_rate": 0.00014358174448464154,
1207
+ "loss": 1.213,
1208
+ "step": 169
1209
+ },
1210
+ {
1211
+ "epoch": 0.3881278538812785,
1212
+ "grad_norm": 53243.4375,
1213
+ "learning_rate": 0.00014290407412138366,
1214
+ "loss": 1.3539,
1215
+ "step": 170
1216
+ },
1217
+ {
1218
+ "epoch": 0.3904109589041096,
1219
+ "grad_norm": 55964.484375,
1220
+ "learning_rate": 0.00014222398025451135,
1221
+ "loss": 1.0541,
1222
+ "step": 171
1223
+ },
1224
+ {
1225
+ "epoch": 0.3926940639269406,
1226
+ "grad_norm": 60372.61328125,
1227
+ "learning_rate": 0.00014154150130018866,
1228
+ "loss": 0.9876,
1229
+ "step": 172
1230
+ },
1231
+ {
1232
+ "epoch": 0.3949771689497717,
1233
+ "grad_norm": 78811.0546875,
1234
+ "learning_rate": 0.0001408566758093048,
1235
+ "loss": 1.1479,
1236
+ "step": 173
1237
+ },
1238
+ {
1239
+ "epoch": 0.3972602739726027,
1240
+ "grad_norm": 68956.203125,
1241
+ "learning_rate": 0.00014016954246529696,
1242
+ "loss": 1.3174,
1243
+ "step": 174
1244
+ },
1245
+ {
1246
+ "epoch": 0.3995433789954338,
1247
+ "grad_norm": 54806.1328125,
1248
+ "learning_rate": 0.00013948014008196487,
1249
+ "loss": 1.1019,
1250
+ "step": 175
1251
+ },
1252
+ {
1253
+ "epoch": 0.4018264840182648,
1254
+ "grad_norm": 62154.96875,
1255
+ "learning_rate": 0.0001387885076012785,
1256
+ "loss": 1.2393,
1257
+ "step": 176
1258
+ },
1259
+ {
1260
+ "epoch": 0.4041095890410959,
1261
+ "grad_norm": 53926.55078125,
1262
+ "learning_rate": 0.00013809468409117846,
1263
+ "loss": 1.1682,
1264
+ "step": 177
1265
+ },
1266
+ {
1267
+ "epoch": 0.4063926940639269,
1268
+ "grad_norm": 55390.63671875,
1269
+ "learning_rate": 0.00013739870874336898,
1270
+ "loss": 1.0232,
1271
+ "step": 178
1272
+ },
1273
+ {
1274
+ "epoch": 0.408675799086758,
1275
+ "grad_norm": 60079.51953125,
1276
+ "learning_rate": 0.00013670062087110422,
1277
+ "loss": 1.2492,
1278
+ "step": 179
1279
+ },
1280
+ {
1281
+ "epoch": 0.410958904109589,
1282
+ "grad_norm": 68658.15625,
1283
+ "learning_rate": 0.00013600045990696762,
1284
+ "loss": 1.432,
1285
+ "step": 180
1286
+ },
1287
+ {
1288
+ "epoch": 0.4132420091324201,
1289
+ "grad_norm": 79412.75,
1290
+ "learning_rate": 0.0001352982654006444,
1291
+ "loss": 1.3679,
1292
+ "step": 181
1293
+ },
1294
+ {
1295
+ "epoch": 0.4155251141552511,
1296
+ "grad_norm": 76066.828125,
1297
+ "learning_rate": 0.00013459407701668763,
1298
+ "loss": 1.3032,
1299
+ "step": 182
1300
+ },
1301
+ {
1302
+ "epoch": 0.4178082191780822,
1303
+ "grad_norm": 74717.1328125,
1304
+ "learning_rate": 0.00013388793453227767,
1305
+ "loss": 1.1265,
1306
+ "step": 183
1307
+ },
1308
+ {
1309
+ "epoch": 0.4200913242009132,
1310
+ "grad_norm": 83329.0859375,
1311
+ "learning_rate": 0.0001331798778349752,
1312
+ "loss": 1.5738,
1313
+ "step": 184
1314
+ },
1315
+ {
1316
+ "epoch": 0.4223744292237443,
1317
+ "grad_norm": 56585.3203125,
1318
+ "learning_rate": 0.00013246994692046836,
1319
+ "loss": 1.1222,
1320
+ "step": 185
1321
+ },
1322
+ {
1323
+ "epoch": 0.4246575342465753,
1324
+ "grad_norm": 66546.9140625,
1325
+ "learning_rate": 0.00013175818189031327,
1326
+ "loss": 1.1622,
1327
+ "step": 186
1328
+ },
1329
+ {
1330
+ "epoch": 0.4269406392694064,
1331
+ "grad_norm": 89381.2734375,
1332
+ "learning_rate": 0.00013104462294966896,
1333
+ "loss": 1.0249,
1334
+ "step": 187
1335
+ },
1336
+ {
1337
+ "epoch": 0.4292237442922374,
1338
+ "grad_norm": 61278.7734375,
1339
+ "learning_rate": 0.00013032931040502627,
1340
+ "loss": 0.9255,
1341
+ "step": 188
1342
+ },
1343
+ {
1344
+ "epoch": 0.4315068493150685,
1345
+ "grad_norm": 73469.1484375,
1346
+ "learning_rate": 0.00012961228466193116,
1347
+ "loss": 1.2164,
1348
+ "step": 189
1349
+ },
1350
+ {
1351
+ "epoch": 0.4337899543378995,
1352
+ "grad_norm": 56180.0546875,
1353
+ "learning_rate": 0.00012889358622270223,
1354
+ "loss": 0.8783,
1355
+ "step": 190
1356
+ },
1357
+ {
1358
+ "epoch": 0.4360730593607306,
1359
+ "grad_norm": 55383.29296875,
1360
+ "learning_rate": 0.00012817325568414297,
1361
+ "loss": 1.0513,
1362
+ "step": 191
1363
+ },
1364
+ {
1365
+ "epoch": 0.4383561643835616,
1366
+ "grad_norm": 67318.65625,
1367
+ "learning_rate": 0.00012745133373524853,
1368
+ "loss": 1.2075,
1369
+ "step": 192
1370
+ },
1371
+ {
1372
+ "epoch": 0.4406392694063927,
1373
+ "grad_norm": 41490.49609375,
1374
+ "learning_rate": 0.0001267278611549073,
1375
+ "loss": 0.4758,
1376
+ "step": 193
1377
+ },
1378
+ {
1379
+ "epoch": 0.4429223744292237,
1380
+ "grad_norm": 56853.50390625,
1381
+ "learning_rate": 0.00012600287880959763,
1382
+ "loss": 1.2679,
1383
+ "step": 194
1384
+ },
1385
+ {
1386
+ "epoch": 0.4452054794520548,
1387
+ "grad_norm": 34868.66796875,
1388
+ "learning_rate": 0.0001252764276510792,
1389
+ "loss": 0.5454,
1390
+ "step": 195
1391
+ },
1392
+ {
1393
+ "epoch": 0.4474885844748858,
1394
+ "grad_norm": 53731.953125,
1395
+ "learning_rate": 0.00012454854871407994,
1396
+ "loss": 1.2682,
1397
+ "step": 196
1398
+ },
1399
+ {
1400
+ "epoch": 0.4497716894977169,
1401
+ "grad_norm": 57530.828125,
1402
+ "learning_rate": 0.00012381928311397806,
1403
+ "loss": 1.1872,
1404
+ "step": 197
1405
+ },
1406
+ {
1407
+ "epoch": 0.4520547945205479,
1408
+ "grad_norm": 59639.96875,
1409
+ "learning_rate": 0.0001230886720444796,
1410
+ "loss": 1.2913,
1411
+ "step": 198
1412
+ },
1413
+ {
1414
+ "epoch": 0.454337899543379,
1415
+ "grad_norm": 76171.09375,
1416
+ "learning_rate": 0.00012235675677529158,
1417
+ "loss": 1.314,
1418
+ "step": 199
1419
+ },
1420
+ {
1421
+ "epoch": 0.45662100456621,
1422
+ "grad_norm": 73176.8984375,
1423
+ "learning_rate": 0.00012162357864979072,
1424
+ "loss": 0.9921,
1425
+ "step": 200
1426
+ },
1427
+ {
1428
+ "epoch": 0.4589041095890411,
1429
+ "grad_norm": 44535.90625,
1430
+ "learning_rate": 0.00012088917908268821,
1431
+ "loss": 0.9701,
1432
+ "step": 201
1433
+ },
1434
+ {
1435
+ "epoch": 0.4611872146118721,
1436
+ "grad_norm": 60352.7734375,
1437
+ "learning_rate": 0.00012015359955769021,
1438
+ "loss": 1.0526,
1439
+ "step": 202
1440
+ },
1441
+ {
1442
+ "epoch": 0.4634703196347032,
1443
+ "grad_norm": 60158.63671875,
1444
+ "learning_rate": 0.00011941688162515467,
1445
+ "loss": 0.9069,
1446
+ "step": 203
1447
+ },
1448
+ {
1449
+ "epoch": 0.4657534246575342,
1450
+ "grad_norm": 56008.59375,
1451
+ "learning_rate": 0.00011867906689974428,
1452
+ "loss": 1.1581,
1453
+ "step": 204
1454
+ },
1455
+ {
1456
+ "epoch": 0.4680365296803653,
1457
+ "grad_norm": 60848.1328125,
1458
+ "learning_rate": 0.00011794019705807584,
1459
+ "loss": 1.4732,
1460
+ "step": 205
1461
+ },
1462
+ {
1463
+ "epoch": 0.4703196347031963,
1464
+ "grad_norm": 73643.15625,
1465
+ "learning_rate": 0.00011720031383636585,
1466
+ "loss": 1.4934,
1467
+ "step": 206
1468
+ },
1469
+ {
1470
+ "epoch": 0.4726027397260274,
1471
+ "grad_norm": 60684.5625,
1472
+ "learning_rate": 0.00011645945902807341,
1473
+ "loss": 0.9241,
1474
+ "step": 207
1475
+ },
1476
+ {
1477
+ "epoch": 0.4748858447488584,
1478
+ "grad_norm": 56336.984375,
1479
+ "learning_rate": 0.00011571767448153901,
1480
+ "loss": 0.7535,
1481
+ "step": 208
1482
+ },
1483
+ {
1484
+ "epoch": 0.4771689497716895,
1485
+ "grad_norm": 57490.16015625,
1486
+ "learning_rate": 0.00011497500209762102,
1487
+ "loss": 1.0042,
1488
+ "step": 209
1489
+ },
1490
+ {
1491
+ "epoch": 0.4794520547945205,
1492
+ "grad_norm": 58042.80859375,
1493
+ "learning_rate": 0.00011423148382732853,
1494
+ "loss": 1.0596,
1495
+ "step": 210
1496
+ },
1497
+ {
1498
+ "epoch": 0.4817351598173516,
1499
+ "grad_norm": 72169.4375,
1500
+ "learning_rate": 0.00011348716166945195,
1501
+ "loss": 1.259,
1502
+ "step": 211
1503
+ },
1504
+ {
1505
+ "epoch": 0.4840182648401826,
1506
+ "grad_norm": 60079.5859375,
1507
+ "learning_rate": 0.0001127420776681905,
1508
+ "loss": 1.189,
1509
+ "step": 212
1510
+ },
1511
+ {
1512
+ "epoch": 0.4863013698630137,
1513
+ "grad_norm": 67140.265625,
1514
+ "learning_rate": 0.00011199627391077732,
1515
+ "loss": 1.4022,
1516
+ "step": 213
1517
+ },
1518
+ {
1519
+ "epoch": 0.4885844748858447,
1520
+ "grad_norm": 52046.58984375,
1521
+ "learning_rate": 0.00011124979252510208,
1522
+ "loss": 1.3088,
1523
+ "step": 214
1524
+ },
1525
+ {
1526
+ "epoch": 0.4908675799086758,
1527
+ "grad_norm": 61540.34375,
1528
+ "learning_rate": 0.0001105026756773314,
1529
+ "loss": 1.1,
1530
+ "step": 215
1531
+ },
1532
+ {
1533
+ "epoch": 0.4931506849315068,
1534
+ "grad_norm": 54976.57421875,
1535
+ "learning_rate": 0.00010975496556952682,
1536
+ "loss": 0.9573,
1537
+ "step": 216
1538
+ },
1539
+ {
1540
+ "epoch": 0.4954337899543379,
1541
+ "grad_norm": 47444.56640625,
1542
+ "learning_rate": 0.00010900670443726135,
1543
+ "loss": 0.7793,
1544
+ "step": 217
1545
+ },
1546
+ {
1547
+ "epoch": 0.4977168949771689,
1548
+ "grad_norm": 114338.7734375,
1549
+ "learning_rate": 0.00010825793454723325,
1550
+ "loss": 1.4479,
1551
+ "step": 218
1552
+ },
1553
+ {
1554
+ "epoch": 0.5,
1555
+ "grad_norm": 55353.40625,
1556
+ "learning_rate": 0.00010750869819487883,
1557
+ "loss": 1.1219,
1558
+ "step": 219
1559
+ },
1560
+ {
1561
+ "epoch": 0.502283105022831,
1562
+ "grad_norm": 56450.48046875,
1563
+ "learning_rate": 0.00010675903770198333,
1564
+ "loss": 1.143,
1565
+ "step": 220
1566
+ },
1567
+ {
1568
+ "epoch": 0.502283105022831,
1569
+ "eval_loss": 1.120017647743225,
1570
+ "eval_runtime": 6.072,
1571
+ "eval_samples_per_second": 16.469,
1572
+ "eval_steps_per_second": 8.235,
1573
+ "step": 220
1574
+ },
1575
+ {
1576
+ "epoch": 0.5045662100456622,
1577
+ "grad_norm": 60988.2421875,
1578
+ "learning_rate": 0.00010600899541429004,
1579
+ "loss": 1.1982,
1580
+ "step": 221
1581
+ },
1582
+ {
1583
+ "epoch": 0.5068493150684932,
1584
+ "grad_norm": 66918.2578125,
1585
+ "learning_rate": 0.00010525861369910877,
1586
+ "loss": 1.2181,
1587
+ "step": 222
1588
+ },
1589
+ {
1590
+ "epoch": 0.5091324200913242,
1591
+ "grad_norm": 79952.6640625,
1592
+ "learning_rate": 0.00010450793494292224,
1593
+ "loss": 1.1986,
1594
+ "step": 223
1595
+ },
1596
+ {
1597
+ "epoch": 0.5114155251141552,
1598
+ "grad_norm": 66536.671875,
1599
+ "learning_rate": 0.00010375700154899208,
1600
+ "loss": 1.5032,
1601
+ "step": 224
1602
+ },
1603
+ {
1604
+ "epoch": 0.5136986301369864,
1605
+ "grad_norm": 71625.8984375,
1606
+ "learning_rate": 0.00010300585593496348,
1607
+ "loss": 1.3998,
1608
+ "step": 225
1609
+ },
1610
+ {
1611
+ "epoch": 0.5159817351598174,
1612
+ "grad_norm": 66669.5703125,
1613
+ "learning_rate": 0.00010225454053046921,
1614
+ "loss": 0.8312,
1615
+ "step": 226
1616
+ },
1617
+ {
1618
+ "epoch": 0.5182648401826484,
1619
+ "grad_norm": 39306.3359375,
1620
+ "learning_rate": 0.00010150309777473306,
1621
+ "loss": 0.6047,
1622
+ "step": 227
1623
+ },
1624
+ {
1625
+ "epoch": 0.5205479452054794,
1626
+ "grad_norm": 53523.4609375,
1627
+ "learning_rate": 0.0001007515701141722,
1628
+ "loss": 1.1629,
1629
+ "step": 228
1630
+ },
1631
+ {
1632
+ "epoch": 0.5228310502283106,
1633
+ "grad_norm": 74079.578125,
1634
+ "learning_rate": 0.0001,
1635
+ "loss": 1.0456,
1636
+ "step": 229
1637
+ },
1638
+ {
1639
+ "epoch": 0.5251141552511416,
1640
+ "grad_norm": 62511.48046875,
1641
+ "learning_rate": 9.924842988582782e-05,
1642
+ "loss": 1.0753,
1643
+ "step": 230
1644
+ },
1645
+ {
1646
+ "epoch": 0.5273972602739726,
1647
+ "grad_norm": 53424.6328125,
1648
+ "learning_rate": 9.849690222526698e-05,
1649
+ "loss": 1.0496,
1650
+ "step": 231
1651
+ },
1652
+ {
1653
+ "epoch": 0.5296803652968036,
1654
+ "grad_norm": 76170.8671875,
1655
+ "learning_rate": 9.77454594695308e-05,
1656
+ "loss": 1.291,
1657
+ "step": 232
1658
+ },
1659
+ {
1660
+ "epoch": 0.5319634703196348,
1661
+ "grad_norm": 54918.74609375,
1662
+ "learning_rate": 9.699414406503654e-05,
1663
+ "loss": 1.0644,
1664
+ "step": 233
1665
+ },
1666
+ {
1667
+ "epoch": 0.5342465753424658,
1668
+ "grad_norm": 90057.484375,
1669
+ "learning_rate": 9.624299845100795e-05,
1670
+ "loss": 1.6448,
1671
+ "step": 234
1672
+ },
1673
+ {
1674
+ "epoch": 0.5365296803652968,
1675
+ "grad_norm": 50608.3125,
1676
+ "learning_rate": 9.549206505707777e-05,
1677
+ "loss": 1.0954,
1678
+ "step": 235
1679
+ },
1680
+ {
1681
+ "epoch": 0.5388127853881278,
1682
+ "grad_norm": 75716.2109375,
1683
+ "learning_rate": 9.474138630089124e-05,
1684
+ "loss": 1.2342,
1685
+ "step": 236
1686
+ },
1687
+ {
1688
+ "epoch": 0.541095890410959,
1689
+ "grad_norm": 43616.71875,
1690
+ "learning_rate": 9.399100458570997e-05,
1691
+ "loss": 0.8373,
1692
+ "step": 237
1693
+ },
1694
+ {
1695
+ "epoch": 0.54337899543379,
1696
+ "grad_norm": 51082.984375,
1697
+ "learning_rate": 9.324096229801674e-05,
1698
+ "loss": 1.0579,
1699
+ "step": 238
1700
+ },
1701
+ {
1702
+ "epoch": 0.545662100456621,
1703
+ "grad_norm": 64326.8359375,
1704
+ "learning_rate": 9.249130180512118e-05,
1705
+ "loss": 1.1863,
1706
+ "step": 239
1707
+ },
1708
+ {
1709
+ "epoch": 0.547945205479452,
1710
+ "grad_norm": 63323.40234375,
1711
+ "learning_rate": 9.174206545276677e-05,
1712
+ "loss": 1.1812,
1713
+ "step": 240
1714
+ },
1715
+ {
1716
+ "epoch": 0.5502283105022832,
1717
+ "grad_norm": 57428.61328125,
1718
+ "learning_rate": 9.099329556273866e-05,
1719
+ "loss": 1.0751,
1720
+ "step": 241
1721
+ },
1722
+ {
1723
+ "epoch": 0.5525114155251142,
1724
+ "grad_norm": 78367.109375,
1725
+ "learning_rate": 9.024503443047319e-05,
1726
+ "loss": 1.0871,
1727
+ "step": 242
1728
+ },
1729
+ {
1730
+ "epoch": 0.5547945205479452,
1731
+ "grad_norm": 56003.375,
1732
+ "learning_rate": 8.949732432266866e-05,
1733
+ "loss": 1.2125,
1734
+ "step": 243
1735
+ },
1736
+ {
1737
+ "epoch": 0.5570776255707762,
1738
+ "grad_norm": 50056.81640625,
1739
+ "learning_rate": 8.875020747489794e-05,
1740
+ "loss": 1.1738,
1741
+ "step": 244
1742
+ },
1743
+ {
1744
+ "epoch": 0.5593607305936074,
1745
+ "grad_norm": 54685.7265625,
1746
+ "learning_rate": 8.800372608922271e-05,
1747
+ "loss": 1.2343,
1748
+ "step": 245
1749
+ },
1750
+ {
1751
+ "epoch": 0.5616438356164384,
1752
+ "grad_norm": 75873.0,
1753
+ "learning_rate": 8.72579223318095e-05,
1754
+ "loss": 1.4155,
1755
+ "step": 246
1756
+ },
1757
+ {
1758
+ "epoch": 0.5639269406392694,
1759
+ "grad_norm": 73813.1171875,
1760
+ "learning_rate": 8.651283833054809e-05,
1761
+ "loss": 1.3641,
1762
+ "step": 247
1763
+ },
1764
+ {
1765
+ "epoch": 0.5662100456621004,
1766
+ "grad_norm": 52799.9140625,
1767
+ "learning_rate": 8.57685161726715e-05,
1768
+ "loss": 1.0217,
1769
+ "step": 248
1770
+ },
1771
+ {
1772
+ "epoch": 0.5684931506849316,
1773
+ "grad_norm": 53224.50390625,
1774
+ "learning_rate": 8.5024997902379e-05,
1775
+ "loss": 1.1589,
1776
+ "step": 249
1777
+ },
1778
+ {
1779
+ "epoch": 0.5707762557077626,
1780
+ "grad_norm": 101657.953125,
1781
+ "learning_rate": 8.428232551846101e-05,
1782
+ "loss": 1.5682,
1783
+ "step": 250
1784
+ },
1785
+ {
1786
+ "epoch": 0.5730593607305936,
1787
+ "grad_norm": 61659.9453125,
1788
+ "learning_rate": 8.35405409719266e-05,
1789
+ "loss": 1.0378,
1790
+ "step": 251
1791
+ },
1792
+ {
1793
+ "epoch": 0.5753424657534246,
1794
+ "grad_norm": 56497.0859375,
1795
+ "learning_rate": 8.279968616363418e-05,
1796
+ "loss": 1.2885,
1797
+ "step": 252
1798
+ },
1799
+ {
1800
+ "epoch": 0.5776255707762558,
1801
+ "grad_norm": 64090.8359375,
1802
+ "learning_rate": 8.205980294192421e-05,
1803
+ "loss": 1.3739,
1804
+ "step": 253
1805
+ },
1806
+ {
1807
+ "epoch": 0.5799086757990868,
1808
+ "grad_norm": 56467.49609375,
1809
+ "learning_rate": 8.132093310025571e-05,
1810
+ "loss": 1.0083,
1811
+ "step": 254
1812
+ },
1813
+ {
1814
+ "epoch": 0.5821917808219178,
1815
+ "grad_norm": 66332.6328125,
1816
+ "learning_rate": 8.058311837484535e-05,
1817
+ "loss": 1.2237,
1818
+ "step": 255
1819
+ },
1820
+ {
1821
+ "epoch": 0.5844748858447488,
1822
+ "grad_norm": 68149.140625,
1823
+ "learning_rate": 7.984640044230983e-05,
1824
+ "loss": 0.8762,
1825
+ "step": 256
1826
+ },
1827
+ {
1828
+ "epoch": 0.58675799086758,
1829
+ "grad_norm": 56869.44140625,
1830
+ "learning_rate": 7.911082091731181e-05,
1831
+ "loss": 1.4265,
1832
+ "step": 257
1833
+ },
1834
+ {
1835
+ "epoch": 0.589041095890411,
1836
+ "grad_norm": 66520.6640625,
1837
+ "learning_rate": 7.837642135020929e-05,
1838
+ "loss": 1.3615,
1839
+ "step": 258
1840
+ },
1841
+ {
1842
+ "epoch": 0.591324200913242,
1843
+ "grad_norm": 50649.890625,
1844
+ "learning_rate": 7.764324322470841e-05,
1845
+ "loss": 1.1493,
1846
+ "step": 259
1847
+ },
1848
+ {
1849
+ "epoch": 0.593607305936073,
1850
+ "grad_norm": 55484.89453125,
1851
+ "learning_rate": 7.691132795552043e-05,
1852
+ "loss": 1.125,
1853
+ "step": 260
1854
+ },
1855
+ {
1856
+ "epoch": 0.5958904109589042,
1857
+ "grad_norm": 69880.4765625,
1858
+ "learning_rate": 7.618071688602199e-05,
1859
+ "loss": 1.0974,
1860
+ "step": 261
1861
+ },
1862
+ {
1863
+ "epoch": 0.5981735159817352,
1864
+ "grad_norm": 69534.0234375,
1865
+ "learning_rate": 7.54514512859201e-05,
1866
+ "loss": 1.4658,
1867
+ "step": 262
1868
+ },
1869
+ {
1870
+ "epoch": 0.6004566210045662,
1871
+ "grad_norm": 63003.56640625,
1872
+ "learning_rate": 7.472357234892082e-05,
1873
+ "loss": 1.3326,
1874
+ "step": 263
1875
+ },
1876
+ {
1877
+ "epoch": 0.6027397260273972,
1878
+ "grad_norm": 48729.6796875,
1879
+ "learning_rate": 7.399712119040238e-05,
1880
+ "loss": 0.9631,
1881
+ "step": 264
1882
+ },
1883
+ {
1884
+ "epoch": 0.6050228310502284,
1885
+ "grad_norm": 53327.13671875,
1886
+ "learning_rate": 7.327213884509272e-05,
1887
+ "loss": 0.8053,
1888
+ "step": 265
1889
+ },
1890
+ {
1891
+ "epoch": 0.6073059360730594,
1892
+ "grad_norm": 56525.19921875,
1893
+ "learning_rate": 7.254866626475152e-05,
1894
+ "loss": 0.9795,
1895
+ "step": 266
1896
+ },
1897
+ {
1898
+ "epoch": 0.6095890410958904,
1899
+ "grad_norm": 54734.92578125,
1900
+ "learning_rate": 7.182674431585704e-05,
1901
+ "loss": 1.0737,
1902
+ "step": 267
1903
+ },
1904
+ {
1905
+ "epoch": 0.6118721461187214,
1906
+ "grad_norm": 62297.49609375,
1907
+ "learning_rate": 7.110641377729778e-05,
1908
+ "loss": 1.3582,
1909
+ "step": 268
1910
+ },
1911
+ {
1912
+ "epoch": 0.6141552511415526,
1913
+ "grad_norm": 69470.6484375,
1914
+ "learning_rate": 7.038771533806884e-05,
1915
+ "loss": 1.261,
1916
+ "step": 269
1917
+ },
1918
+ {
1919
+ "epoch": 0.6164383561643836,
1920
+ "grad_norm": 75963.8046875,
1921
+ "learning_rate": 6.967068959497376e-05,
1922
+ "loss": 1.2154,
1923
+ "step": 270
1924
+ },
1925
+ {
1926
+ "epoch": 0.6187214611872146,
1927
+ "grad_norm": 66193.421875,
1928
+ "learning_rate": 6.895537705033108e-05,
1929
+ "loss": 1.2711,
1930
+ "step": 271
1931
+ },
1932
+ {
1933
+ "epoch": 0.6210045662100456,
1934
+ "grad_norm": 53985.34765625,
1935
+ "learning_rate": 6.824181810968675e-05,
1936
+ "loss": 0.9666,
1937
+ "step": 272
1938
+ },
1939
+ {
1940
+ "epoch": 0.6232876712328768,
1941
+ "grad_norm": 54472.45703125,
1942
+ "learning_rate": 6.753005307953167e-05,
1943
+ "loss": 0.7533,
1944
+ "step": 273
1945
+ },
1946
+ {
1947
+ "epoch": 0.6255707762557078,
1948
+ "grad_norm": 56472.04296875,
1949
+ "learning_rate": 6.682012216502484e-05,
1950
+ "loss": 1.1015,
1951
+ "step": 274
1952
+ },
1953
+ {
1954
+ "epoch": 0.6278538812785388,
1955
+ "grad_norm": 59576.98046875,
1956
+ "learning_rate": 6.611206546772237e-05,
1957
+ "loss": 1.243,
1958
+ "step": 275
1959
+ },
1960
+ {
1961
+ "epoch": 0.6301369863013698,
1962
+ "grad_norm": 46372.140625,
1963
+ "learning_rate": 6.54059229833124e-05,
1964
+ "loss": 0.919,
1965
+ "step": 276
1966
+ },
1967
+ {
1968
+ "epoch": 0.632420091324201,
1969
+ "grad_norm": 67258.171875,
1970
+ "learning_rate": 6.47017345993556e-05,
1971
+ "loss": 1.2923,
1972
+ "step": 277
1973
+ },
1974
+ {
1975
+ "epoch": 0.634703196347032,
1976
+ "grad_norm": 97251.375,
1977
+ "learning_rate": 6.39995400930324e-05,
1978
+ "loss": 1.3966,
1979
+ "step": 278
1980
+ },
1981
+ {
1982
+ "epoch": 0.636986301369863,
1983
+ "grad_norm": 55997.97265625,
1984
+ "learning_rate": 6.329937912889582e-05,
1985
+ "loss": 0.7084,
1986
+ "step": 279
1987
+ },
1988
+ {
1989
+ "epoch": 0.639269406392694,
1990
+ "grad_norm": 52094.359375,
1991
+ "learning_rate": 6.260129125663106e-05,
1992
+ "loss": 0.9639,
1993
+ "step": 280
1994
+ },
1995
+ {
1996
+ "epoch": 0.6415525114155252,
1997
+ "grad_norm": 59095.65625,
1998
+ "learning_rate": 6.190531590882159e-05,
1999
+ "loss": 1.2343,
2000
+ "step": 281
2001
+ },
2002
+ {
2003
+ "epoch": 0.6438356164383562,
2004
+ "grad_norm": 50110.4375,
2005
+ "learning_rate": 6.121149239872151e-05,
2006
+ "loss": 1.1458,
2007
+ "step": 282
2008
+ },
2009
+ {
2010
+ "epoch": 0.6461187214611872,
2011
+ "grad_norm": 61831.3359375,
2012
+ "learning_rate": 6.051985991803517e-05,
2013
+ "loss": 1.1047,
2014
+ "step": 283
2015
+ },
2016
+ {
2017
+ "epoch": 0.6484018264840182,
2018
+ "grad_norm": 68382.28125,
2019
+ "learning_rate": 5.983045753470308e-05,
2020
+ "loss": 1.0395,
2021
+ "step": 284
2022
+ },
2023
+ {
2024
+ "epoch": 0.6506849315068494,
2025
+ "grad_norm": 52874.1796875,
2026
+ "learning_rate": 5.9143324190695196e-05,
2027
+ "loss": 1.1445,
2028
+ "step": 285
2029
+ },
2030
+ {
2031
+ "epoch": 0.6529680365296804,
2032
+ "grad_norm": 53148.92578125,
2033
+ "learning_rate": 5.845849869981137e-05,
2034
+ "loss": 1.1164,
2035
+ "step": 286
2036
+ },
2037
+ {
2038
+ "epoch": 0.6552511415525114,
2039
+ "grad_norm": 49849.4765625,
2040
+ "learning_rate": 5.777601974548866e-05,
2041
+ "loss": 0.8629,
2042
+ "step": 287
2043
+ },
2044
+ {
2045
+ "epoch": 0.6575342465753424,
2046
+ "grad_norm": 55761.171875,
2047
+ "learning_rate": 5.709592587861637e-05,
2048
+ "loss": 1.0324,
2049
+ "step": 288
2050
+ },
2051
+ {
2052
+ "epoch": 0.6598173515981736,
2053
+ "grad_norm": 47947.84765625,
2054
+ "learning_rate": 5.6418255515358486e-05,
2055
+ "loss": 0.9341,
2056
+ "step": 289
2057
+ },
2058
+ {
2059
+ "epoch": 0.6621004566210046,
2060
+ "grad_norm": 69258.109375,
2061
+ "learning_rate": 5.574304693498346e-05,
2062
+ "loss": 1.1231,
2063
+ "step": 290
2064
+ },
2065
+ {
2066
+ "epoch": 0.6643835616438356,
2067
+ "grad_norm": 48883.62109375,
2068
+ "learning_rate": 5.507033827770225e-05,
2069
+ "loss": 1.0446,
2070
+ "step": 291
2071
+ },
2072
+ {
2073
+ "epoch": 0.6666666666666666,
2074
+ "grad_norm": 48850.08203125,
2075
+ "learning_rate": 5.4400167542513636e-05,
2076
+ "loss": 1.0385,
2077
+ "step": 292
2078
+ },
2079
+ {
2080
+ "epoch": 0.6689497716894978,
2081
+ "grad_norm": 58763.58984375,
2082
+ "learning_rate": 5.3732572585057974e-05,
2083
+ "loss": 1.3623,
2084
+ "step": 293
2085
+ },
2086
+ {
2087
+ "epoch": 0.6712328767123288,
2088
+ "grad_norm": 53041.3046875,
2089
+ "learning_rate": 5.306759111547881e-05,
2090
+ "loss": 1.1103,
2091
+ "step": 294
2092
+ },
2093
+ {
2094
+ "epoch": 0.6735159817351598,
2095
+ "grad_norm": 54536.75390625,
2096
+ "learning_rate": 5.240526069629265e-05,
2097
+ "loss": 1.359,
2098
+ "step": 295
2099
+ },
2100
+ {
2101
+ "epoch": 0.6757990867579908,
2102
+ "grad_norm": 55320.109375,
2103
+ "learning_rate": 5.174561874026741e-05,
2104
+ "loss": 1.1521,
2105
+ "step": 296
2106
+ },
2107
+ {
2108
+ "epoch": 0.678082191780822,
2109
+ "grad_norm": 56988.97265625,
2110
+ "learning_rate": 5.108870250830882e-05,
2111
+ "loss": 1.2104,
2112
+ "step": 297
2113
+ },
2114
+ {
2115
+ "epoch": 0.680365296803653,
2116
+ "grad_norm": 65441.95703125,
2117
+ "learning_rate": 5.0434549107355944e-05,
2118
+ "loss": 1.0381,
2119
+ "step": 298
2120
+ },
2121
+ {
2122
+ "epoch": 0.682648401826484,
2123
+ "grad_norm": 54248.94921875,
2124
+ "learning_rate": 4.978319548828504e-05,
2125
+ "loss": 1.0877,
2126
+ "step": 299
2127
+ },
2128
+ {
2129
+ "epoch": 0.684931506849315,
2130
+ "grad_norm": 62817.375,
2131
+ "learning_rate": 4.9134678443822166e-05,
2132
+ "loss": 1.1492,
2133
+ "step": 300
2134
+ },
2135
+ {
2136
+ "epoch": 0.6872146118721462,
2137
+ "grad_norm": 55446.65234375,
2138
+ "learning_rate": 4.8489034606465225e-05,
2139
+ "loss": 1.078,
2140
+ "step": 301
2141
+ },
2142
+ {
2143
+ "epoch": 0.6894977168949772,
2144
+ "grad_norm": 61516.6484375,
2145
+ "learning_rate": 4.784630044641435e-05,
2146
+ "loss": 1.0592,
2147
+ "step": 302
2148
+ },
2149
+ {
2150
+ "epoch": 0.6917808219178082,
2151
+ "grad_norm": 53431.09375,
2152
+ "learning_rate": 4.7206512269512124e-05,
2153
+ "loss": 1.2311,
2154
+ "step": 303
2155
+ },
2156
+ {
2157
+ "epoch": 0.6940639269406392,
2158
+ "grad_norm": 68350.4609375,
2159
+ "learning_rate": 4.65697062151927e-05,
2160
+ "loss": 0.9918,
2161
+ "step": 304
2162
+ },
2163
+ {
2164
+ "epoch": 0.6963470319634704,
2165
+ "grad_norm": 53722.33984375,
2166
+ "learning_rate": 4.593591825444028e-05,
2167
+ "loss": 1.0563,
2168
+ "step": 305
2169
+ },
2170
+ {
2171
+ "epoch": 0.6986301369863014,
2172
+ "grad_norm": 73647.765625,
2173
+ "learning_rate": 4.530518418775733e-05,
2174
+ "loss": 1.1067,
2175
+ "step": 306
2176
+ },
2177
+ {
2178
+ "epoch": 0.7009132420091324,
2179
+ "grad_norm": 60410.765625,
2180
+ "learning_rate": 4.4677539643142454e-05,
2181
+ "loss": 1.2064,
2182
+ "step": 307
2183
+ },
2184
+ {
2185
+ "epoch": 0.7031963470319634,
2186
+ "grad_norm": 74342.1328125,
2187
+ "learning_rate": 4.40530200740777e-05,
2188
+ "loss": 1.2019,
2189
+ "step": 308
2190
+ },
2191
+ {
2192
+ "epoch": 0.7054794520547946,
2193
+ "grad_norm": 58122.41796875,
2194
+ "learning_rate": 4.343166075752605e-05,
2195
+ "loss": 1.0697,
2196
+ "step": 309
2197
+ },
2198
+ {
2199
+ "epoch": 0.7077625570776256,
2200
+ "grad_norm": 68661.671875,
2201
+ "learning_rate": 4.281349679193861e-05,
2202
+ "loss": 1.444,
2203
+ "step": 310
2204
+ },
2205
+ {
2206
+ "epoch": 0.7100456621004566,
2207
+ "grad_norm": 60156.05859375,
2208
+ "learning_rate": 4.2198563095272116e-05,
2209
+ "loss": 1.0489,
2210
+ "step": 311
2211
+ },
2212
+ {
2213
+ "epoch": 0.7123287671232876,
2214
+ "grad_norm": 72793.8359375,
2215
+ "learning_rate": 4.158689440301657e-05,
2216
+ "loss": 1.225,
2217
+ "step": 312
2218
+ },
2219
+ {
2220
+ "epoch": 0.7146118721461188,
2221
+ "grad_norm": 61000.60546875,
2222
+ "learning_rate": 4.097852526623307e-05,
2223
+ "loss": 1.1954,
2224
+ "step": 313
2225
+ },
2226
+ {
2227
+ "epoch": 0.7168949771689498,
2228
+ "grad_norm": 60486.58203125,
2229
+ "learning_rate": 4.0373490049602204e-05,
2230
+ "loss": 1.0551,
2231
+ "step": 314
2232
+ },
2233
+ {
2234
+ "epoch": 0.7191780821917808,
2235
+ "grad_norm": 91112.53125,
2236
+ "learning_rate": 3.977182292948283e-05,
2237
+ "loss": 1.2928,
2238
+ "step": 315
2239
+ },
2240
+ {
2241
+ "epoch": 0.7214611872146118,
2242
+ "grad_norm": 60165.15234375,
2243
+ "learning_rate": 3.9173557891981573e-05,
2244
+ "loss": 0.9197,
2245
+ "step": 316
2246
+ },
2247
+ {
2248
+ "epoch": 0.723744292237443,
2249
+ "grad_norm": 63943.1875,
2250
+ "learning_rate": 3.857872873103322e-05,
2251
+ "loss": 1.0148,
2252
+ "step": 317
2253
+ },
2254
+ {
2255
+ "epoch": 0.726027397260274,
2256
+ "grad_norm": 66577.5234375,
2257
+ "learning_rate": 3.7987369046491684e-05,
2258
+ "loss": 1.1527,
2259
+ "step": 318
2260
+ },
2261
+ {
2262
+ "epoch": 0.728310502283105,
2263
+ "grad_norm": 46133.828125,
2264
+ "learning_rate": 3.7399512242231995e-05,
2265
+ "loss": 0.6874,
2266
+ "step": 319
2267
+ },
2268
+ {
2269
+ "epoch": 0.730593607305936,
2270
+ "grad_norm": 54615.4296875,
2271
+ "learning_rate": 3.6815191524263624e-05,
2272
+ "loss": 1.2463,
2273
+ "step": 320
2274
+ },
2275
+ {
2276
+ "epoch": 0.7328767123287672,
2277
+ "grad_norm": 60014.6015625,
2278
+ "learning_rate": 3.623443989885462e-05,
2279
+ "loss": 1.3196,
2280
+ "step": 321
2281
+ },
2282
+ {
2283
+ "epoch": 0.7351598173515982,
2284
+ "grad_norm": 53860.69140625,
2285
+ "learning_rate": 3.565729017066729e-05,
2286
+ "loss": 1.0468,
2287
+ "step": 322
2288
+ },
2289
+ {
2290
+ "epoch": 0.7374429223744292,
2291
+ "grad_norm": 55772.921875,
2292
+ "learning_rate": 3.508377494090521e-05,
2293
+ "loss": 1.0851,
2294
+ "step": 323
2295
+ },
2296
+ {
2297
+ "epoch": 0.7397260273972602,
2298
+ "grad_norm": 53785.3203125,
2299
+ "learning_rate": 3.45139266054715e-05,
2300
+ "loss": 1.2084,
2301
+ "step": 324
2302
+ },
2303
+ {
2304
+ "epoch": 0.7420091324200914,
2305
+ "grad_norm": 57143.98046875,
2306
+ "learning_rate": 3.394777735313919e-05,
2307
+ "loss": 1.031,
2308
+ "step": 325
2309
+ },
2310
+ {
2311
+ "epoch": 0.7442922374429224,
2312
+ "grad_norm": 60805.40625,
2313
+ "learning_rate": 3.338535916373266e-05,
2314
+ "loss": 1.2843,
2315
+ "step": 326
2316
+ },
2317
+ {
2318
+ "epoch": 0.7465753424657534,
2319
+ "grad_norm": 56165.6875,
2320
+ "learning_rate": 3.2826703806321525e-05,
2321
+ "loss": 0.9815,
2322
+ "step": 327
2323
+ },
2324
+ {
2325
+ "epoch": 0.7488584474885844,
2326
+ "grad_norm": 65290.7421875,
2327
+ "learning_rate": 3.227184283742591e-05,
2328
+ "loss": 1.2205,
2329
+ "step": 328
2330
+ },
2331
+ {
2332
+ "epoch": 0.7511415525114156,
2333
+ "grad_norm": 59236.3828125,
2334
+ "learning_rate": 3.17208075992339e-05,
2335
+ "loss": 1.0087,
2336
+ "step": 329
2337
+ },
2338
+ {
2339
+ "epoch": 0.7534246575342466,
2340
+ "grad_norm": 68966.4921875,
2341
+ "learning_rate": 3.117362921783134e-05,
2342
+ "loss": 1.2744,
2343
+ "step": 330
2344
+ },
2345
+ {
2346
+ "epoch": 0.7534246575342466,
2347
+ "eval_loss": 1.1098405122756958,
2348
+ "eval_runtime": 5.9855,
2349
+ "eval_samples_per_second": 16.707,
2350
+ "eval_steps_per_second": 8.354,
2351
+ "step": 330
2352
+ },
2353
+ {
2354
+ "epoch": 0.7557077625570776,
2355
+ "grad_norm": 57247.7265625,
2356
+ "learning_rate": 3.063033860144339e-05,
2357
+ "loss": 1.3947,
2358
+ "step": 331
2359
+ },
2360
+ {
2361
+ "epoch": 0.7579908675799086,
2362
+ "grad_norm": 60022.890625,
2363
+ "learning_rate": 3.0090966438688772e-05,
2364
+ "loss": 1.4142,
2365
+ "step": 332
2366
+ },
2367
+ {
2368
+ "epoch": 0.7602739726027398,
2369
+ "grad_norm": 48251.8359375,
2370
+ "learning_rate": 2.9555543196846292e-05,
2371
+ "loss": 0.9917,
2372
+ "step": 333
2373
+ },
2374
+ {
2375
+ "epoch": 0.7625570776255708,
2376
+ "grad_norm": 50926.33203125,
2377
+ "learning_rate": 2.9024099120133673e-05,
2378
+ "loss": 1.0497,
2379
+ "step": 334
2380
+ },
2381
+ {
2382
+ "epoch": 0.7648401826484018,
2383
+ "grad_norm": 51535.24609375,
2384
+ "learning_rate": 2.8496664227999415e-05,
2385
+ "loss": 1.0956,
2386
+ "step": 335
2387
+ },
2388
+ {
2389
+ "epoch": 0.7671232876712328,
2390
+ "grad_norm": 47902.59375,
2391
+ "learning_rate": 2.7973268313426837e-05,
2392
+ "loss": 1.182,
2393
+ "step": 336
2394
+ },
2395
+ {
2396
+ "epoch": 0.769406392694064,
2397
+ "grad_norm": 57668.80859375,
2398
+ "learning_rate": 2.745394094125141e-05,
2399
+ "loss": 1.1184,
2400
+ "step": 337
2401
+ },
2402
+ {
2403
+ "epoch": 0.771689497716895,
2404
+ "grad_norm": 65797.921875,
2405
+ "learning_rate": 2.6938711446490606e-05,
2406
+ "loss": 1.524,
2407
+ "step": 338
2408
+ },
2409
+ {
2410
+ "epoch": 0.773972602739726,
2411
+ "grad_norm": 44397.484375,
2412
+ "learning_rate": 2.6427608932686843e-05,
2413
+ "loss": 1.0515,
2414
+ "step": 339
2415
+ },
2416
+ {
2417
+ "epoch": 0.776255707762557,
2418
+ "grad_norm": 88163.90625,
2419
+ "learning_rate": 2.5920662270263653e-05,
2420
+ "loss": 1.291,
2421
+ "step": 340
2422
+ },
2423
+ {
2424
+ "epoch": 0.7785388127853882,
2425
+ "grad_norm": 91966.3046875,
2426
+ "learning_rate": 2.5417900094894744e-05,
2427
+ "loss": 1.5279,
2428
+ "step": 341
2429
+ },
2430
+ {
2431
+ "epoch": 0.7808219178082192,
2432
+ "grad_norm": 54689.46875,
2433
+ "learning_rate": 2.4919350805886577e-05,
2434
+ "loss": 1.0487,
2435
+ "step": 342
2436
+ },
2437
+ {
2438
+ "epoch": 0.7831050228310502,
2439
+ "grad_norm": 60095.0703125,
2440
+ "learning_rate": 2.4425042564574184e-05,
2441
+ "loss": 1.3099,
2442
+ "step": 343
2443
+ },
2444
+ {
2445
+ "epoch": 0.7853881278538812,
2446
+ "grad_norm": 54892.97265625,
2447
+ "learning_rate": 2.3935003292730296e-05,
2448
+ "loss": 1.1576,
2449
+ "step": 344
2450
+ },
2451
+ {
2452
+ "epoch": 0.7876712328767124,
2453
+ "grad_norm": 99210.53125,
2454
+ "learning_rate": 2.344926067098836e-05,
2455
+ "loss": 1.846,
2456
+ "step": 345
2457
+ },
2458
+ {
2459
+ "epoch": 0.7899543378995434,
2460
+ "grad_norm": 65337.87890625,
2461
+ "learning_rate": 2.2967842137278706e-05,
2462
+ "loss": 1.1789,
2463
+ "step": 346
2464
+ },
2465
+ {
2466
+ "epoch": 0.7922374429223744,
2467
+ "grad_norm": 71690.0625,
2468
+ "learning_rate": 2.2490774885278908e-05,
2469
+ "loss": 1.3072,
2470
+ "step": 347
2471
+ },
2472
+ {
2473
+ "epoch": 0.7945205479452054,
2474
+ "grad_norm": 58516.3828125,
2475
+ "learning_rate": 2.201808586287757e-05,
2476
+ "loss": 1.0443,
2477
+ "step": 348
2478
+ },
2479
+ {
2480
+ "epoch": 0.7968036529680366,
2481
+ "grad_norm": 72086.8828125,
2482
+ "learning_rate": 2.15498017706521e-05,
2483
+ "loss": 1.1584,
2484
+ "step": 349
2485
+ },
2486
+ {
2487
+ "epoch": 0.7990867579908676,
2488
+ "grad_norm": 58905.08984375,
2489
+ "learning_rate": 2.1085949060360654e-05,
2490
+ "loss": 1.2844,
2491
+ "step": 350
2492
+ },
2493
+ {
2494
+ "epoch": 0.8013698630136986,
2495
+ "grad_norm": 56423.5703125,
2496
+ "learning_rate": 2.0626553933447734e-05,
2497
+ "loss": 1.1755,
2498
+ "step": 351
2499
+ },
2500
+ {
2501
+ "epoch": 0.8036529680365296,
2502
+ "grad_norm": 55340.45703125,
2503
+ "learning_rate": 2.01716423395644e-05,
2504
+ "loss": 1.08,
2505
+ "step": 352
2506
+ },
2507
+ {
2508
+ "epoch": 0.8059360730593608,
2509
+ "grad_norm": 56130.5859375,
2510
+ "learning_rate": 1.9721239975102313e-05,
2511
+ "loss": 1.1129,
2512
+ "step": 353
2513
+ },
2514
+ {
2515
+ "epoch": 0.8082191780821918,
2516
+ "grad_norm": 58951.8046875,
2517
+ "learning_rate": 1.9275372281742242e-05,
2518
+ "loss": 0.9749,
2519
+ "step": 354
2520
+ },
2521
+ {
2522
+ "epoch": 0.8105022831050228,
2523
+ "grad_norm": 58070.171875,
2524
+ "learning_rate": 1.8834064445016953e-05,
2525
+ "loss": 1.3564,
2526
+ "step": 355
2527
+ },
2528
+ {
2529
+ "epoch": 0.8127853881278538,
2530
+ "grad_norm": 56061.4921875,
2531
+ "learning_rate": 1.839734139288868e-05,
2532
+ "loss": 1.2425,
2533
+ "step": 356
2534
+ },
2535
+ {
2536
+ "epoch": 0.815068493150685,
2537
+ "grad_norm": 56343.59375,
2538
+ "learning_rate": 1.7965227794340877e-05,
2539
+ "loss": 0.9662,
2540
+ "step": 357
2541
+ },
2542
+ {
2543
+ "epoch": 0.817351598173516,
2544
+ "grad_norm": 61289.234375,
2545
+ "learning_rate": 1.753774805798486e-05,
2546
+ "loss": 1.1741,
2547
+ "step": 358
2548
+ },
2549
+ {
2550
+ "epoch": 0.819634703196347,
2551
+ "grad_norm": 98183.703125,
2552
+ "learning_rate": 1.7114926330680957e-05,
2553
+ "loss": 0.9826,
2554
+ "step": 359
2555
+ },
2556
+ {
2557
+ "epoch": 0.821917808219178,
2558
+ "grad_norm": 55233.2890625,
2559
+ "learning_rate": 1.6696786496174578e-05,
2560
+ "loss": 1.0732,
2561
+ "step": 360
2562
+ },
2563
+ {
2564
+ "epoch": 0.8242009132420092,
2565
+ "grad_norm": 45176.41015625,
2566
+ "learning_rate": 1.6283352173747145e-05,
2567
+ "loss": 1.0048,
2568
+ "step": 361
2569
+ },
2570
+ {
2571
+ "epoch": 0.8264840182648402,
2572
+ "grad_norm": 66357.2109375,
2573
+ "learning_rate": 1.587464671688187e-05,
2574
+ "loss": 1.1619,
2575
+ "step": 362
2576
+ },
2577
+ {
2578
+ "epoch": 0.8287671232876712,
2579
+ "grad_norm": 57376.734375,
2580
+ "learning_rate": 1.5470693211944643e-05,
2581
+ "loss": 1.1175,
2582
+ "step": 363
2583
+ },
2584
+ {
2585
+ "epoch": 0.8310502283105022,
2586
+ "grad_norm": 54212.546875,
2587
+ "learning_rate": 1.5071514476879878e-05,
2588
+ "loss": 0.9726,
2589
+ "step": 364
2590
+ },
2591
+ {
2592
+ "epoch": 0.8333333333333334,
2593
+ "grad_norm": 65802.8515625,
2594
+ "learning_rate": 1.4677133059921632e-05,
2595
+ "loss": 1.3092,
2596
+ "step": 365
2597
+ },
2598
+ {
2599
+ "epoch": 0.8356164383561644,
2600
+ "grad_norm": 49888.99609375,
2601
+ "learning_rate": 1.4287571238320053e-05,
2602
+ "loss": 0.5516,
2603
+ "step": 366
2604
+ },
2605
+ {
2606
+ "epoch": 0.8378995433789954,
2607
+ "grad_norm": 66406.1875,
2608
+ "learning_rate": 1.3902851017082864e-05,
2609
+ "loss": 0.9952,
2610
+ "step": 367
2611
+ },
2612
+ {
2613
+ "epoch": 0.8401826484018264,
2614
+ "grad_norm": 57395.77734375,
2615
+ "learning_rate": 1.3522994127732414e-05,
2616
+ "loss": 1.1279,
2617
+ "step": 368
2618
+ },
2619
+ {
2620
+ "epoch": 0.8424657534246576,
2621
+ "grad_norm": 79530.40625,
2622
+ "learning_rate": 1.3148022027078222e-05,
2623
+ "loss": 1.8326,
2624
+ "step": 369
2625
+ },
2626
+ {
2627
+ "epoch": 0.8447488584474886,
2628
+ "grad_norm": 63298.96875,
2629
+ "learning_rate": 1.2777955896004812e-05,
2630
+ "loss": 1.0041,
2631
+ "step": 370
2632
+ },
2633
+ {
2634
+ "epoch": 0.8470319634703196,
2635
+ "grad_norm": 74275.5234375,
2636
+ "learning_rate": 1.2412816638275404e-05,
2637
+ "loss": 1.3752,
2638
+ "step": 371
2639
+ },
2640
+ {
2641
+ "epoch": 0.8493150684931506,
2642
+ "grad_norm": 57181.0390625,
2643
+ "learning_rate": 1.2052624879351104e-05,
2644
+ "loss": 1.3001,
2645
+ "step": 372
2646
+ },
2647
+ {
2648
+ "epoch": 0.8515981735159818,
2649
+ "grad_norm": 56421.484375,
2650
+ "learning_rate": 1.1697400965225747e-05,
2651
+ "loss": 1.1496,
2652
+ "step": 373
2653
+ },
2654
+ {
2655
+ "epoch": 0.8538812785388128,
2656
+ "grad_norm": 55708.265625,
2657
+ "learning_rate": 1.134716496127679e-05,
2658
+ "loss": 1.0953,
2659
+ "step": 374
2660
+ },
2661
+ {
2662
+ "epoch": 0.8561643835616438,
2663
+ "grad_norm": 64520.75,
2664
+ "learning_rate": 1.1001936651131717e-05,
2665
+ "loss": 0.9648,
2666
+ "step": 375
2667
+ },
2668
+ {
2669
+ "epoch": 0.8584474885844748,
2670
+ "grad_norm": 67208.2578125,
2671
+ "learning_rate": 1.0661735535550666e-05,
2672
+ "loss": 1.465,
2673
+ "step": 376
2674
+ },
2675
+ {
2676
+ "epoch": 0.860730593607306,
2677
+ "grad_norm": 69055.765625,
2678
+ "learning_rate": 1.0326580831324817e-05,
2679
+ "loss": 1.1437,
2680
+ "step": 377
2681
+ },
2682
+ {
2683
+ "epoch": 0.863013698630137,
2684
+ "grad_norm": 56862.21875,
2685
+ "learning_rate": 9.996491470190917e-06,
2686
+ "loss": 1.1829,
2687
+ "step": 378
2688
+ },
2689
+ {
2690
+ "epoch": 0.865296803652968,
2691
+ "grad_norm": 43474.109375,
2692
+ "learning_rate": 9.671486097761917e-06,
2693
+ "loss": 0.7937,
2694
+ "step": 379
2695
+ },
2696
+ {
2697
+ "epoch": 0.867579908675799,
2698
+ "grad_norm": 58035.9140625,
2699
+ "learning_rate": 9.351583072473713e-06,
2700
+ "loss": 1.2742,
2701
+ "step": 380
2702
+ },
2703
+ {
2704
+ "epoch": 0.8698630136986302,
2705
+ "grad_norm": 59717.421875,
2706
+ "learning_rate": 9.036800464548157e-06,
2707
+ "loss": 1.245,
2708
+ "step": 381
2709
+ },
2710
+ {
2711
+ "epoch": 0.8721461187214612,
2712
+ "grad_norm": 100361.84375,
2713
+ "learning_rate": 8.727156054972374e-06,
2714
+ "loss": 0.9628,
2715
+ "step": 382
2716
+ },
2717
+ {
2718
+ "epoch": 0.8744292237442922,
2719
+ "grad_norm": 46780.90234375,
2720
+ "learning_rate": 8.422667334494249e-06,
2721
+ "loss": 0.7594,
2722
+ "step": 383
2723
+ },
2724
+ {
2725
+ "epoch": 0.8767123287671232,
2726
+ "grad_norm": 58426.484375,
2727
+ "learning_rate": 8.123351502634625e-06,
2728
+ "loss": 1.3035,
2729
+ "step": 384
2730
+ },
2731
+ {
2732
+ "epoch": 0.8789954337899544,
2733
+ "grad_norm": 55850.3046875,
2734
+ "learning_rate": 7.82922546671555e-06,
2735
+ "loss": 1.3021,
2736
+ "step": 385
2737
+ },
2738
+ {
2739
+ "epoch": 0.8812785388127854,
2740
+ "grad_norm": 65162.09375,
2741
+ "learning_rate": 7.54030584090537e-06,
2742
+ "loss": 1.214,
2743
+ "step": 386
2744
+ },
2745
+ {
2746
+ "epoch": 0.8835616438356164,
2747
+ "grad_norm": 50473.97265625,
2748
+ "learning_rate": 7.256608945280319e-06,
2749
+ "loss": 1.1281,
2750
+ "step": 387
2751
+ },
2752
+ {
2753
+ "epoch": 0.8858447488584474,
2754
+ "grad_norm": 50409.5390625,
2755
+ "learning_rate": 6.97815080490245e-06,
2756
+ "loss": 1.2092,
2757
+ "step": 388
2758
+ },
2759
+ {
2760
+ "epoch": 0.8881278538812786,
2761
+ "grad_norm": 56921.63671875,
2762
+ "learning_rate": 6.704947148914609e-06,
2763
+ "loss": 1.0073,
2764
+ "step": 389
2765
+ },
2766
+ {
2767
+ "epoch": 0.8904109589041096,
2768
+ "grad_norm": 72200.1875,
2769
+ "learning_rate": 6.437013409651849e-06,
2770
+ "loss": 1.0158,
2771
+ "step": 390
2772
+ },
2773
+ {
2774
+ "epoch": 0.8926940639269406,
2775
+ "grad_norm": 48175.20703125,
2776
+ "learning_rate": 6.174364721769743e-06,
2777
+ "loss": 0.939,
2778
+ "step": 391
2779
+ },
2780
+ {
2781
+ "epoch": 0.8949771689497716,
2782
+ "grad_norm": 53166.9296875,
2783
+ "learning_rate": 5.917015921389568e-06,
2784
+ "loss": 1.0468,
2785
+ "step": 392
2786
+ },
2787
+ {
2788
+ "epoch": 0.8972602739726028,
2789
+ "grad_norm": 58858.93359375,
2790
+ "learning_rate": 5.664981545260073e-06,
2791
+ "loss": 1.0791,
2792
+ "step": 393
2793
+ },
2794
+ {
2795
+ "epoch": 0.8995433789954338,
2796
+ "grad_norm": 49101.5078125,
2797
+ "learning_rate": 5.418275829936537e-06,
2798
+ "loss": 1.1875,
2799
+ "step": 394
2800
+ },
2801
+ {
2802
+ "epoch": 0.9018264840182648,
2803
+ "grad_norm": 65572.046875,
2804
+ "learning_rate": 5.176912710976467e-06,
2805
+ "loss": 1.326,
2806
+ "step": 395
2807
+ },
2808
+ {
2809
+ "epoch": 0.9041095890410958,
2810
+ "grad_norm": 61459.63671875,
2811
+ "learning_rate": 4.940905822152453e-06,
2812
+ "loss": 1.0855,
2813
+ "step": 396
2814
+ },
2815
+ {
2816
+ "epoch": 0.906392694063927,
2817
+ "grad_norm": 64900.1796875,
2818
+ "learning_rate": 4.710268494682146e-06,
2819
+ "loss": 1.3393,
2820
+ "step": 397
2821
+ },
2822
+ {
2823
+ "epoch": 0.908675799086758,
2824
+ "grad_norm": 52331.64453125,
2825
+ "learning_rate": 4.485013756475076e-06,
2826
+ "loss": 1.0434,
2827
+ "step": 398
2828
+ },
2829
+ {
2830
+ "epoch": 0.910958904109589,
2831
+ "grad_norm": 71229.609375,
2832
+ "learning_rate": 4.2651543313968145e-06,
2833
+ "loss": 1.2118,
2834
+ "step": 399
2835
+ },
2836
+ {
2837
+ "epoch": 0.91324200913242,
2838
+ "grad_norm": 63443.84375,
2839
+ "learning_rate": 4.050702638550275e-06,
2840
+ "loss": 1.1458,
2841
+ "step": 400
2842
+ },
2843
+ {
2844
+ "epoch": 0.9155251141552512,
2845
+ "grad_norm": 64409.73828125,
2846
+ "learning_rate": 3.841670791574137e-06,
2847
+ "loss": 1.1705,
2848
+ "step": 401
2849
+ },
2850
+ {
2851
+ "epoch": 0.9178082191780822,
2852
+ "grad_norm": 81092.15625,
2853
+ "learning_rate": 3.638070597958665e-06,
2854
+ "loss": 1.2991,
2855
+ "step": 402
2856
+ },
2857
+ {
2858
+ "epoch": 0.9200913242009132,
2859
+ "grad_norm": 41609.2109375,
2860
+ "learning_rate": 3.4399135583787043e-06,
2861
+ "loss": 0.4622,
2862
+ "step": 403
2863
+ },
2864
+ {
2865
+ "epoch": 0.9223744292237442,
2866
+ "grad_norm": 58560.71875,
2867
+ "learning_rate": 3.2472108660439706e-06,
2868
+ "loss": 0.8759,
2869
+ "step": 404
2870
+ },
2871
+ {
2872
+ "epoch": 0.9246575342465754,
2873
+ "grad_norm": 65780.9609375,
2874
+ "learning_rate": 3.059973406066963e-06,
2875
+ "loss": 1.3583,
2876
+ "step": 405
2877
+ },
2878
+ {
2879
+ "epoch": 0.9269406392694064,
2880
+ "grad_norm": 53281.921875,
2881
+ "learning_rate": 2.878211754847926e-06,
2882
+ "loss": 0.971,
2883
+ "step": 406
2884
+ },
2885
+ {
2886
+ "epoch": 0.9292237442922374,
2887
+ "grad_norm": 62492.41015625,
2888
+ "learning_rate": 2.7019361794775156e-06,
2889
+ "loss": 1.1152,
2890
+ "step": 407
2891
+ },
2892
+ {
2893
+ "epoch": 0.9315068493150684,
2894
+ "grad_norm": 57761.875,
2895
+ "learning_rate": 2.5311566371568507e-06,
2896
+ "loss": 1.1735,
2897
+ "step": 408
2898
+ },
2899
+ {
2900
+ "epoch": 0.9337899543378996,
2901
+ "grad_norm": 53913.1640625,
2902
+ "learning_rate": 2.365882774634998e-06,
2903
+ "loss": 0.9759,
2904
+ "step": 409
2905
+ },
2906
+ {
2907
+ "epoch": 0.9360730593607306,
2908
+ "grad_norm": 57200.4375,
2909
+ "learning_rate": 2.206123927664161e-06,
2910
+ "loss": 1.262,
2911
+ "step": 410
2912
+ },
2913
+ {
2914
+ "epoch": 0.9383561643835616,
2915
+ "grad_norm": 64508.68359375,
2916
+ "learning_rate": 2.0518891204722168e-06,
2917
+ "loss": 1.1443,
2918
+ "step": 411
2919
+ },
2920
+ {
2921
+ "epoch": 0.9406392694063926,
2922
+ "grad_norm": 68100.71875,
2923
+ "learning_rate": 1.903187065253076e-06,
2924
+ "loss": 1.156,
2925
+ "step": 412
2926
+ },
2927
+ {
2928
+ "epoch": 0.9429223744292238,
2929
+ "grad_norm": 46483.26171875,
2930
+ "learning_rate": 1.7600261616745106e-06,
2931
+ "loss": 0.9084,
2932
+ "step": 413
2933
+ },
2934
+ {
2935
+ "epoch": 0.9452054794520548,
2936
+ "grad_norm": 75558.0078125,
2937
+ "learning_rate": 1.6224144964036681e-06,
2938
+ "loss": 1.3323,
2939
+ "step": 414
2940
+ },
2941
+ {
2942
+ "epoch": 0.9474885844748858,
2943
+ "grad_norm": 61868.96484375,
2944
+ "learning_rate": 1.4903598426503241e-06,
2945
+ "loss": 1.6048,
2946
+ "step": 415
2947
+ },
2948
+ {
2949
+ "epoch": 0.9497716894977168,
2950
+ "grad_norm": 48915.76953125,
2951
+ "learning_rate": 1.3638696597277679e-06,
2952
+ "loss": 0.9012,
2953
+ "step": 416
2954
+ },
2955
+ {
2956
+ "epoch": 0.952054794520548,
2957
+ "grad_norm": 75366.640625,
2958
+ "learning_rate": 1.2429510926314836e-06,
2959
+ "loss": 1.132,
2960
+ "step": 417
2961
+ },
2962
+ {
2963
+ "epoch": 0.954337899543379,
2964
+ "grad_norm": 60648.18359375,
2965
+ "learning_rate": 1.1276109716355287e-06,
2966
+ "loss": 1.0748,
2967
+ "step": 418
2968
+ },
2969
+ {
2970
+ "epoch": 0.95662100456621,
2971
+ "grad_norm": 55365.9765625,
2972
+ "learning_rate": 1.0178558119067315e-06,
2973
+ "loss": 0.827,
2974
+ "step": 419
2975
+ },
2976
+ {
2977
+ "epoch": 0.958904109589041,
2978
+ "grad_norm": 58780.57421875,
2979
+ "learning_rate": 9.136918131366412e-07,
2980
+ "loss": 1.2993,
2981
+ "step": 420
2982
+ },
2983
+ {
2984
+ "epoch": 0.9611872146118722,
2985
+ "grad_norm": 47850.671875,
2986
+ "learning_rate": 8.151248591913518e-07,
2987
+ "loss": 1.0673,
2988
+ "step": 421
2989
+ },
2990
+ {
2991
+ "epoch": 0.9634703196347032,
2992
+ "grad_norm": 50755.59765625,
2993
+ "learning_rate": 7.221605177791691e-07,
2994
+ "loss": 1.0819,
2995
+ "step": 422
2996
+ },
2997
+ {
2998
+ "epoch": 0.9657534246575342,
2999
+ "grad_norm": 68484.7265625,
3000
+ "learning_rate": 6.348040401360833e-07,
3001
+ "loss": 1.3769,
3002
+ "step": 423
3003
+ },
3004
+ {
3005
+ "epoch": 0.9680365296803652,
3006
+ "grad_norm": 59223.92578125,
3007
+ "learning_rate": 5.530603607290851e-07,
3008
+ "loss": 1.3159,
3009
+ "step": 424
3010
+ },
3011
+ {
3012
+ "epoch": 0.9703196347031964,
3013
+ "grad_norm": 63650.15234375,
3014
+ "learning_rate": 4.76934096977566e-07,
3015
+ "loss": 1.3134,
3016
+ "step": 425
3017
+ },
3018
+ {
3019
+ "epoch": 0.9726027397260274,
3020
+ "grad_norm": 62978.29296875,
3021
+ "learning_rate": 4.0642954899238197e-07,
3022
+ "loss": 0.9593,
3023
+ "step": 426
3024
+ },
3025
+ {
3026
+ "epoch": 0.9748858447488584,
3027
+ "grad_norm": 63336.55078125,
3028
+ "learning_rate": 3.415506993330153e-07,
3029
+ "loss": 1.1585,
3030
+ "step": 427
3031
+ },
3032
+ {
3033
+ "epoch": 0.9771689497716894,
3034
+ "grad_norm": 58021.15625,
3035
+ "learning_rate": 2.8230121278257637e-07,
3036
+ "loss": 1.2236,
3037
+ "step": 428
3038
+ },
3039
+ {
3040
+ "epoch": 0.9794520547945206,
3041
+ "grad_norm": 56498.8515625,
3042
+ "learning_rate": 2.2868443614082469e-07,
3043
+ "loss": 1.0606,
3044
+ "step": 429
3045
+ },
3046
+ {
3047
+ "epoch": 0.9817351598173516,
3048
+ "grad_norm": 48720.328125,
3049
+ "learning_rate": 1.8070339803509807e-07,
3050
+ "loss": 1.123,
3051
+ "step": 430
3052
+ },
3053
+ {
3054
+ "epoch": 0.9840182648401826,
3055
+ "grad_norm": 60693.24609375,
3056
+ "learning_rate": 1.3836080874926049e-07,
3057
+ "loss": 0.5269,
3058
+ "step": 431
3059
+ },
3060
+ {
3061
+ "epoch": 0.9863013698630136,
3062
+ "grad_norm": 123484.75,
3063
+ "learning_rate": 1.0165906007056914e-07,
3064
+ "loss": 1.2241,
3065
+ "step": 432
3066
+ },
3067
+ {
3068
+ "epoch": 0.9885844748858448,
3069
+ "grad_norm": 52265.00390625,
3070
+ "learning_rate": 7.060022515460451e-08,
3071
+ "loss": 0.8173,
3072
+ "step": 433
3073
+ },
3074
+ {
3075
+ "epoch": 0.9908675799086758,
3076
+ "grad_norm": 54763.09375,
3077
+ "learning_rate": 4.518605840815315e-08,
3078
+ "loss": 1.2235,
3079
+ "step": 434
3080
+ },
3081
+ {
3082
+ "epoch": 0.9931506849315068,
3083
+ "grad_norm": 67981.8515625,
3084
+ "learning_rate": 2.5417995390086824e-08,
3085
+ "loss": 1.3369,
3086
+ "step": 435
3087
+ },
3088
+ {
3089
+ "epoch": 0.9954337899543378,
3090
+ "grad_norm": 53286.59375,
3091
+ "learning_rate": 1.129715273033849e-08,
3092
+ "loss": 0.9584,
3093
+ "step": 436
3094
+ },
3095
+ {
3096
+ "epoch": 0.997716894977169,
3097
+ "grad_norm": 54882.32421875,
3098
+ "learning_rate": 2.824328066730608e-09,
3099
+ "loss": 1.0711,
3100
+ "step": 437
3101
+ },
3102
+ {
3103
+ "epoch": 1.0,
3104
+ "grad_norm": 54870.58984375,
3105
+ "learning_rate": 0.0,
3106
+ "loss": 1.1062,
3107
+ "step": 438
3108
+ }
3109
+ ],
3110
+ "logging_steps": 1,
3111
+ "max_steps": 438,
3112
+ "num_input_tokens_seen": 0,
3113
+ "num_train_epochs": 1,
3114
+ "save_steps": 500,
3115
+ "stateful_callbacks": {
3116
+ "TrainerControl": {
3117
+ "args": {
3118
+ "should_epoch_stop": false,
3119
+ "should_evaluate": false,
3120
+ "should_log": false,
3121
+ "should_save": true,
3122
+ "should_training_stop": true
3123
+ },
3124
+ "attributes": {}
3125
+ }
3126
+ },
3127
+ "total_flos": 7972410614906880.0,
3128
+ "train_batch_size": 2,
3129
+ "trial_name": null,
3130
+ "trial_params": null
3131
+ }
checkpoint-438/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e61646f11278fd7349a14f4ee14f752c2e534aee882e099faef4438d9aff025
3
+ size 6136
config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openlm-research/open_llama_3b_v2",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 3200,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 8640,
14
+ "max_position_embeddings": 2048,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 32,
18
+ "num_hidden_layers": 26,
19
+ "num_key_value_heads": 32,
20
+ "pad_token_id": 0,
21
+ "pretraining_tp": 1,
22
+ "quantization_config": {
23
+ "_load_in_4bit": true,
24
+ "_load_in_8bit": false,
25
+ "bnb_4bit_compute_dtype": "float16",
26
+ "bnb_4bit_quant_storage": "bfloat16",
27
+ "bnb_4bit_quant_type": "nf4",
28
+ "bnb_4bit_use_double_quant": true,
29
+ "llm_int8_enable_fp32_cpu_offload": false,
30
+ "llm_int8_has_fp16_weight": false,
31
+ "llm_int8_skip_modules": null,
32
+ "llm_int8_threshold": 6.0,
33
+ "load_in_4bit": true,
34
+ "load_in_8bit": false,
35
+ "quant_method": "bitsandbytes"
36
+ },
37
+ "rms_norm_eps": 1e-06,
38
+ "rope_scaling": null,
39
+ "rope_theta": 10000.0,
40
+ "tie_word_embeddings": false,
41
+ "torch_dtype": "float16",
42
+ "transformers_version": "4.44.2",
43
+ "use_cache": false,
44
+ "vocab_size": 32000
45
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91b289e85fa20fd375d8b33dc12f77616f18abc6359804471d1fafcb425fecb8
3
+ size 511574
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": true,
35
+ "model_max_length": 2048,
36
+ "pad_token": "</s>",
37
+ "sp_model_kwargs": {},
38
+ "spaces_between_special_tokens": false,
39
+ "tokenizer_class": "LlamaTokenizer",
40
+ "unk_token": "<unk>",
41
+ "use_default_system_prompt": false,
42
+ "use_fast": true
43
+ }