nbsts commited on
Commit
00160cb
·
1 Parent(s): 6cc7253

Upload 14 files

Browse files
README.md ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ library_name: peft
4
+ tags:
5
+ - llama-factory
6
+ - lora
7
+ - generated_from_trainer
8
+ base_model: meta-llama/Llama-2-13b-hf
9
+ model-index:
10
+ - name: train_2024-01-03-02-43-22
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # train_2024-01-03-02-43-22
18
+
19
+ This model is a fine-tuned version of [meta-llama/Llama-2-13b-hf](https://huggingface.co/meta-llama/Llama-2-13b-hf) on the anli_train_r1_contradiction dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 1.0611
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 0.0002
41
+ - train_batch_size: 4
42
+ - eval_batch_size: 8
43
+ - seed: 42
44
+ - gradient_accumulation_steps: 4
45
+ - total_train_batch_size: 16
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: cosine
48
+ - num_epochs: 3.0
49
+ - mixed_precision_training: Native AMP
50
+
51
+ ### Training results
52
+
53
+ | Training Loss | Epoch | Step | Validation Loss |
54
+ |:-------------:|:-----:|:----:|:---------------:|
55
+ | 1.0228 | 0.44 | 100 | 1.0888 |
56
+ | 1.0068 | 0.88 | 200 | 1.0738 |
57
+ | 1.0218 | 1.33 | 300 | 1.0680 |
58
+ | 1.084 | 1.77 | 400 | 1.0611 |
59
+ | 0.897 | 2.21 | 500 | 1.0757 |
60
+ | 0.9597 | 2.65 | 600 | 1.0745 |
61
+
62
+
63
+ ### Framework versions
64
+
65
+ - PEFT 0.7.1
66
+ - Transformers 4.36.2
67
+ - Pytorch 2.1.0+cu121
68
+ - Datasets 2.16.1
69
+ - Tokenizers 0.15.0
adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 8,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "q_proj",
23
+ "v_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0b4a88864c3ffdd465afcd0fa14bfae2014a6052f2bfa8bcaca0dab78b73038
3
+ size 26235704
all_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_loss": 1.061107873916626,
4
+ "eval_runtime": 75.5544,
5
+ "eval_samples_per_second": 11.978,
6
+ "eval_steps_per_second": 1.509,
7
+ "train_loss": 0.9757168982232918,
8
+ "train_runtime": 3618.1154,
9
+ "train_samples_per_second": 3.0,
10
+ "train_steps_per_second": 0.187
11
+ }
eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_loss": 1.061107873916626,
4
+ "eval_runtime": 75.5544,
5
+ "eval_samples_per_second": 11.978,
6
+ "eval_steps_per_second": 1.509
7
+ }
runs/Jan03_02-49-47_e0b4d469ca82/events.out.tfevents.1704250549.e0b4d469ca82.6302.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d8c4a061277a2ed1ffb875cb0d3aa2ad94293cad56e66e1c1895e4c9440b13d
3
+ size 28118
runs/Jan03_02-49-47_e0b4d469ca82/events.out.tfevents.1704254243.e0b4d469ca82.6302.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81d5427d69571f52d9cdfa6b8517f0ef08f432552f02dfebcce14572caa4a22c
3
+ size 359
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "bos_token": "<s>",
31
+ "clean_up_tokenization_spaces": false,
32
+ "eos_token": "</s>",
33
+ "legacy": false,
34
+ "model_max_length": 1000000000000000019884624838656,
35
+ "pad_token": "</s>",
36
+ "padding_side": "right",
37
+ "sp_model_kwargs": {},
38
+ "spaces_between_special_tokens": false,
39
+ "split_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false
43
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 0.9757168982232918,
4
+ "train_runtime": 3618.1154,
5
+ "train_samples_per_second": 3.0,
6
+ "train_steps_per_second": 0.187
7
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 5, "total_steps": 678, "loss": 1.4828, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00019997316318671806, "epoch": 0.02, "percentage": 0.74, "elapsed_time": "0:00:25", "remaining_time": "0:56:40"}
2
+ {"current_steps": 10, "total_steps": 678, "loss": 1.1204, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00019989266715116316, "epoch": 0.04, "percentage": 1.47, "elapsed_time": "0:00:48", "remaining_time": "0:54:07"}
3
+ {"current_steps": 15, "total_steps": 678, "loss": 1.1449, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00019975855509847686, "epoch": 0.07, "percentage": 2.21, "elapsed_time": "0:01:12", "remaining_time": "0:53:13"}
4
+ {"current_steps": 20, "total_steps": 678, "loss": 1.1126, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00019957089901146148, "epoch": 0.09, "percentage": 2.95, "elapsed_time": "0:01:35", "remaining_time": "0:52:15"}
5
+ {"current_steps": 25, "total_steps": 678, "loss": 1.1048, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00019932979961194435, "epoch": 0.11, "percentage": 3.69, "elapsed_time": "0:01:59", "remaining_time": "0:51:50"}
6
+ {"current_steps": 30, "total_steps": 678, "loss": 1.2161, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.0001990353863067169, "epoch": 0.13, "percentage": 4.42, "elapsed_time": "0:02:22", "remaining_time": "0:51:11"}
7
+ {"current_steps": 35, "total_steps": 678, "loss": 1.0051, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00019868781711807705, "epoch": 0.15, "percentage": 5.16, "elapsed_time": "0:02:45", "remaining_time": "0:50:37"}
8
+ {"current_steps": 40, "total_steps": 678, "loss": 1.0314, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00019828727859901317, "epoch": 0.18, "percentage": 5.9, "elapsed_time": "0:03:08", "remaining_time": "0:50:12"}
9
+ {"current_steps": 45, "total_steps": 678, "loss": 1.0718, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00019783398573307428, "epoch": 0.2, "percentage": 6.64, "elapsed_time": "0:03:32", "remaining_time": "0:49:46"}
10
+ {"current_steps": 50, "total_steps": 678, "loss": 1.1571, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00019732818181898045, "epoch": 0.22, "percentage": 7.37, "elapsed_time": "0:03:55", "remaining_time": "0:49:14"}
11
+ {"current_steps": 55, "total_steps": 678, "loss": 1.0602, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.0001967701383400357, "epoch": 0.24, "percentage": 8.11, "elapsed_time": "0:04:18", "remaining_time": "0:48:45"}
12
+ {"current_steps": 60, "total_steps": 678, "loss": 1.1215, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.0001961601548184129, "epoch": 0.27, "percentage": 8.85, "elapsed_time": "0:04:41", "remaining_time": "0:48:20"}
13
+ {"current_steps": 65, "total_steps": 678, "loss": 1.1434, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00019549855865438965, "epoch": 0.29, "percentage": 9.59, "elapsed_time": "0:05:05", "remaining_time": "0:47:56"}
14
+ {"current_steps": 70, "total_steps": 678, "loss": 1.0844, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00019478570495062037, "epoch": 0.31, "percentage": 10.32, "elapsed_time": "0:05:28", "remaining_time": "0:47:34"}
15
+ {"current_steps": 75, "total_steps": 678, "loss": 1.1123, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00019402197632153992, "epoch": 0.33, "percentage": 11.06, "elapsed_time": "0:05:51", "remaining_time": "0:47:07"}
16
+ {"current_steps": 80, "total_steps": 678, "loss": 1.1054, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00019320778268800066, "epoch": 0.35, "percentage": 11.8, "elapsed_time": "0:06:14", "remaining_time": "0:46:42"}
17
+ {"current_steps": 85, "total_steps": 678, "loss": 1.0368, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00019234356105725297, "epoch": 0.38, "percentage": 12.54, "elapsed_time": "0:06:38", "remaining_time": "0:46:19"}
18
+ {"current_steps": 90, "total_steps": 678, "loss": 1.1195, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00019142977528838762, "epoch": 0.4, "percentage": 13.27, "elapsed_time": "0:07:01", "remaining_time": "0:45:53"}
19
+ {"current_steps": 95, "total_steps": 678, "loss": 1.0778, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00019046691584336577, "epoch": 0.42, "percentage": 14.01, "elapsed_time": "0:07:24", "remaining_time": "0:45:27"}
20
+ {"current_steps": 100, "total_steps": 678, "loss": 1.0228, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.0001894554995237703, "epoch": 0.44, "percentage": 14.75, "elapsed_time": "0:07:47", "remaining_time": "0:45:01"}
21
+ {"current_steps": 100, "total_steps": 678, "loss": null, "eval_loss": 1.0887844562530518, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.44, "percentage": 14.75, "elapsed_time": "0:07:47", "remaining_time": "0:45:01"}
22
+ {"current_steps": 105, "total_steps": 678, "loss": 1.1156, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.0001883960691934196, "epoch": 0.46, "percentage": 15.49, "elapsed_time": "0:09:26", "remaining_time": "0:51:31"}
23
+ {"current_steps": 110, "total_steps": 678, "loss": 1.0259, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00018728919348699283, "epoch": 0.49, "percentage": 16.22, "elapsed_time": "0:09:49", "remaining_time": "0:50:44"}
24
+ {"current_steps": 115, "total_steps": 678, "loss": 1.0112, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00018613546650482322, "epoch": 0.51, "percentage": 16.96, "elapsed_time": "0:10:12", "remaining_time": "0:49:59"}
25
+ {"current_steps": 120, "total_steps": 678, "loss": 1.0279, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00018493550749402278, "epoch": 0.53, "percentage": 17.7, "elapsed_time": "0:10:35", "remaining_time": "0:49:15"}
26
+ {"current_steps": 125, "total_steps": 678, "loss": 1.105, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00018368996051610986, "epoch": 0.55, "percentage": 18.44, "elapsed_time": "0:10:58", "remaining_time": "0:48:33"}
27
+ {"current_steps": 130, "total_steps": 678, "loss": 1.041, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00018239949410131802, "epoch": 0.57, "percentage": 19.17, "elapsed_time": "0:11:22", "remaining_time": "0:47:57"}
28
+ {"current_steps": 135, "total_steps": 678, "loss": 1.0475, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00018106480088977172, "epoch": 0.6, "percentage": 19.91, "elapsed_time": "0:11:45", "remaining_time": "0:47:17"}
29
+ {"current_steps": 140, "total_steps": 678, "loss": 1.0241, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00017968659725972112, "epoch": 0.62, "percentage": 20.65, "elapsed_time": "0:12:08", "remaining_time": "0:46:40"}
30
+ {"current_steps": 145, "total_steps": 678, "loss": 1.0071, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00017826562294303585, "epoch": 0.64, "percentage": 21.39, "elapsed_time": "0:12:32", "remaining_time": "0:46:06"}
31
+ {"current_steps": 150, "total_steps": 678, "loss": 0.9714, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.0001768026406281642, "epoch": 0.66, "percentage": 22.12, "elapsed_time": "0:12:55", "remaining_time": "0:45:31"}
32
+ {"current_steps": 155, "total_steps": 678, "loss": 1.0376, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00017529843555077066, "epoch": 0.69, "percentage": 22.86, "elapsed_time": "0:13:19", "remaining_time": "0:44:58"}
33
+ {"current_steps": 160, "total_steps": 678, "loss": 0.9806, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00017375381507227108, "epoch": 0.71, "percentage": 23.6, "elapsed_time": "0:13:43", "remaining_time": "0:44:26"}
34
+ {"current_steps": 165, "total_steps": 678, "loss": 1.1022, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00017216960824649303, "epoch": 0.73, "percentage": 24.34, "elapsed_time": "0:14:07", "remaining_time": "0:43:53"}
35
+ {"current_steps": 170, "total_steps": 678, "loss": 0.9723, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00017054666537469213, "epoch": 0.75, "percentage": 25.07, "elapsed_time": "0:14:31", "remaining_time": "0:43:22"}
36
+ {"current_steps": 175, "total_steps": 678, "loss": 1.1081, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00016888585754916476, "epoch": 0.77, "percentage": 25.81, "elapsed_time": "0:14:54", "remaining_time": "0:42:51"}
37
+ {"current_steps": 180, "total_steps": 678, "loss": 1.0525, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00016718807618570106, "epoch": 0.8, "percentage": 26.55, "elapsed_time": "0:15:17", "remaining_time": "0:42:18"}
38
+ {"current_steps": 185, "total_steps": 678, "loss": 1.0761, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00016545423254513004, "epoch": 0.82, "percentage": 27.29, "elapsed_time": "0:15:41", "remaining_time": "0:41:47"}
39
+ {"current_steps": 190, "total_steps": 678, "loss": 1.1493, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00016368525724421248, "epoch": 0.84, "percentage": 28.02, "elapsed_time": "0:16:03", "remaining_time": "0:41:15"}
40
+ {"current_steps": 195, "total_steps": 678, "loss": 0.9942, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00016188209975614542, "epoch": 0.86, "percentage": 28.76, "elapsed_time": "0:16:27", "remaining_time": "0:40:45"}
41
+ {"current_steps": 200, "total_steps": 678, "loss": 1.0068, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00016004572790094535, "epoch": 0.88, "percentage": 29.5, "elapsed_time": "0:16:50", "remaining_time": "0:40:14"}
42
+ {"current_steps": 200, "total_steps": 678, "loss": null, "eval_loss": 1.0737706422805786, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.88, "percentage": 29.5, "elapsed_time": "0:16:50", "remaining_time": "0:40:14"}
43
+ {"current_steps": 205, "total_steps": 678, "loss": 1.0579, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00015817712732598413, "epoch": 0.91, "percentage": 30.24, "elapsed_time": "0:18:28", "remaining_time": "0:42:38"}
44
+ {"current_steps": 210, "total_steps": 678, "loss": 1.0985, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00015627730097695638, "epoch": 0.93, "percentage": 30.97, "elapsed_time": "0:18:52", "remaining_time": "0:42:02"}
45
+ {"current_steps": 215, "total_steps": 678, "loss": 1.071, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00015434726855956206, "epoch": 0.95, "percentage": 31.71, "elapsed_time": "0:19:15", "remaining_time": "0:41:28"}
46
+ {"current_steps": 220, "total_steps": 678, "loss": 1.0617, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00015238806599219336, "epoch": 0.97, "percentage": 32.45, "elapsed_time": "0:19:38", "remaining_time": "0:40:53"}
47
+ {"current_steps": 225, "total_steps": 678, "loss": 0.9789, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00015040074484992, "epoch": 0.99, "percentage": 33.19, "elapsed_time": "0:20:01", "remaining_time": "0:40:18"}
48
+ {"current_steps": 230, "total_steps": 678, "loss": 1.072, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00014838637180007047, "epoch": 1.02, "percentage": 33.92, "elapsed_time": "0:20:24", "remaining_time": "0:39:44"}
49
+ {"current_steps": 235, "total_steps": 678, "loss": 1.0098, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00014634602802971312, "epoch": 1.04, "percentage": 34.66, "elapsed_time": "0:20:47", "remaining_time": "0:39:12"}
50
+ {"current_steps": 240, "total_steps": 678, "loss": 1.0222, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00014428080866534396, "epoch": 1.06, "percentage": 35.4, "elapsed_time": "0:21:11", "remaining_time": "0:38:40"}
51
+ {"current_steps": 245, "total_steps": 678, "loss": 0.9734, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.0001421918221850923, "epoch": 1.08, "percentage": 36.14, "elapsed_time": "0:21:34", "remaining_time": "0:38:08"}
52
+ {"current_steps": 250, "total_steps": 678, "loss": 0.9364, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00014008018982376044, "epoch": 1.1, "percentage": 36.87, "elapsed_time": "0:21:57", "remaining_time": "0:37:36"}
53
+ {"current_steps": 255, "total_steps": 678, "loss": 0.9735, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00013794704497101655, "epoch": 1.13, "percentage": 37.61, "elapsed_time": "0:22:21", "remaining_time": "0:37:04"}
54
+ {"current_steps": 260, "total_steps": 678, "loss": 1.0933, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00013579353256306287, "epoch": 1.15, "percentage": 38.35, "elapsed_time": "0:22:44", "remaining_time": "0:36:33"}
55
+ {"current_steps": 265, "total_steps": 678, "loss": 0.9764, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00013362080846810725, "epoch": 1.17, "percentage": 39.09, "elapsed_time": "0:23:07", "remaining_time": "0:36:03"}
56
+ {"current_steps": 270, "total_steps": 678, "loss": 1.0584, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00013143003886596669, "epoch": 1.19, "percentage": 39.82, "elapsed_time": "0:23:32", "remaining_time": "0:35:34"}
57
+ {"current_steps": 275, "total_steps": 678, "loss": 0.9116, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00012922239962213637, "epoch": 1.22, "percentage": 40.56, "elapsed_time": "0:23:55", "remaining_time": "0:35:04"}
58
+ {"current_steps": 280, "total_steps": 678, "loss": 1.0587, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00012699907565665982, "epoch": 1.24, "percentage": 41.3, "elapsed_time": "0:24:18", "remaining_time": "0:34:33"}
59
+ {"current_steps": 285, "total_steps": 678, "loss": 0.9342, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00012476126030813963, "epoch": 1.26, "percentage": 42.04, "elapsed_time": "0:24:41", "remaining_time": "0:34:02"}
60
+ {"current_steps": 290, "total_steps": 678, "loss": 0.9826, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00012251015469322916, "epoch": 1.28, "percentage": 42.77, "elapsed_time": "0:25:05", "remaining_time": "0:33:33"}
61
+ {"current_steps": 295, "total_steps": 678, "loss": 1.0656, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00012024696706194967, "epoch": 1.3, "percentage": 43.51, "elapsed_time": "0:25:28", "remaining_time": "0:33:04"}
62
+ {"current_steps": 300, "total_steps": 678, "loss": 1.0218, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00011797291214917881, "epoch": 1.33, "percentage": 44.25, "elapsed_time": "0:25:51", "remaining_time": "0:32:35"}
63
+ {"current_steps": 300, "total_steps": 678, "loss": null, "eval_loss": 1.06796395778656, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.33, "percentage": 44.25, "elapsed_time": "0:25:51", "remaining_time": "0:32:35"}
64
+ {"current_steps": 305, "total_steps": 678, "loss": 0.9915, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00011568921052265836, "epoch": 1.35, "percentage": 44.99, "elapsed_time": "0:27:30", "remaining_time": "0:33:38"}
65
+ {"current_steps": 310, "total_steps": 678, "loss": 0.9592, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00011339708792787119, "epoch": 1.37, "percentage": 45.72, "elapsed_time": "0:27:54", "remaining_time": "0:33:07"}
66
+ {"current_steps": 315, "total_steps": 678, "loss": 0.9875, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00011109777463013915, "epoch": 1.39, "percentage": 46.46, "elapsed_time": "0:28:17", "remaining_time": "0:32:36"}
67
+ {"current_steps": 320, "total_steps": 678, "loss": 1.1583, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00010879250475429523, "epoch": 1.41, "percentage": 47.2, "elapsed_time": "0:28:40", "remaining_time": "0:32:04"}
68
+ {"current_steps": 325, "total_steps": 678, "loss": 0.977, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00010648251562228386, "epoch": 1.44, "percentage": 47.94, "elapsed_time": "0:29:03", "remaining_time": "0:31:34"}
69
+ {"current_steps": 330, "total_steps": 678, "loss": 1.041, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00010416904708904548, "epoch": 1.46, "percentage": 48.67, "elapsed_time": "0:29:27", "remaining_time": "0:31:03"}
70
+ {"current_steps": 335, "total_steps": 678, "loss": 0.972, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 0.00010185334087704124, "epoch": 1.48, "percentage": 49.41, "elapsed_time": "0:29:50", "remaining_time": "0:30:33"}
71
+ {"current_steps": 340, "total_steps": 678, "loss": 1.0115, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.953663990977568e-05, "epoch": 1.5, "percentage": 50.15, "elapsed_time": "0:30:14", "remaining_time": "0:30:03"}
72
+ {"current_steps": 345, "total_steps": 678, "loss": 1.0078, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.722018764467461e-05, "epoch": 1.52, "percentage": 50.88, "elapsed_time": "0:30:38", "remaining_time": "0:29:34"}
73
+ {"current_steps": 350, "total_steps": 678, "loss": 0.955, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.490522740567633e-05, "epoch": 1.55, "percentage": 51.62, "elapsed_time": "0:31:00", "remaining_time": "0:29:03"}
74
+ {"current_steps": 355, "total_steps": 678, "loss": 0.9719, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.259300171589456e-05, "epoch": 1.57, "percentage": 52.36, "elapsed_time": "0:31:23", "remaining_time": "0:28:34"}
75
+ {"current_steps": 360, "total_steps": 678, "loss": 0.871, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.028475163071141e-05, "epoch": 1.59, "percentage": 53.1, "elapsed_time": "0:31:47", "remaining_time": "0:28:04"}
76
+ {"current_steps": 365, "total_steps": 678, "loss": 0.9568, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.798171607165778e-05, "epoch": 1.61, "percentage": 53.83, "elapsed_time": "0:32:10", "remaining_time": "0:27:35"}
77
+ {"current_steps": 370, "total_steps": 678, "loss": 0.9466, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.568513116143919e-05, "epoch": 1.64, "percentage": 54.57, "elapsed_time": "0:32:34", "remaining_time": "0:27:06"}
78
+ {"current_steps": 375, "total_steps": 678, "loss": 0.8967, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.339622956046417e-05, "epoch": 1.66, "percentage": 55.31, "elapsed_time": "0:32:57", "remaining_time": "0:26:38"}
79
+ {"current_steps": 380, "total_steps": 678, "loss": 0.9465, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.111623980523035e-05, "epoch": 1.68, "percentage": 56.05, "elapsed_time": "0:33:21", "remaining_time": "0:26:09"}
80
+ {"current_steps": 385, "total_steps": 678, "loss": 0.9499, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.884638564892472e-05, "epoch": 1.7, "percentage": 56.78, "elapsed_time": "0:33:44", "remaining_time": "0:25:40"}
81
+ {"current_steps": 390, "total_steps": 678, "loss": 0.8955, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.658788540459062e-05, "epoch": 1.72, "percentage": 57.52, "elapsed_time": "0:34:07", "remaining_time": "0:25:12"}
82
+ {"current_steps": 395, "total_steps": 678, "loss": 0.8982, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.434195129121518e-05, "epoch": 1.75, "percentage": 58.26, "elapsed_time": "0:34:31", "remaining_time": "0:24:44"}
83
+ {"current_steps": 400, "total_steps": 678, "loss": 1.084, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.210978878308729e-05, "epoch": 1.77, "percentage": 59.0, "elapsed_time": "0:34:54", "remaining_time": "0:24:16"}
84
+ {"current_steps": 400, "total_steps": 678, "loss": null, "eval_loss": 1.061107873916626, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.77, "percentage": 59.0, "elapsed_time": "0:34:54", "remaining_time": "0:24:16"}
85
+ {"current_steps": 405, "total_steps": 678, "loss": 0.8936, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.989259596277582e-05, "epoch": 1.79, "percentage": 59.73, "elapsed_time": "0:36:34", "remaining_time": "0:24:38"}
86
+ {"current_steps": 410, "total_steps": 678, "loss": 0.8794, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.76915628780754e-05, "epoch": 1.81, "percentage": 60.47, "elapsed_time": "0:36:57", "remaining_time": "0:24:09"}
87
+ {"current_steps": 415, "total_steps": 678, "loss": 0.9631, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.55078709032644e-05, "epoch": 1.83, "percentage": 61.21, "elapsed_time": "0:37:20", "remaining_time": "0:23:39"}
88
+ {"current_steps": 420, "total_steps": 678, "loss": 0.9636, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.334269210501875e-05, "epoch": 1.86, "percentage": 61.95, "elapsed_time": "0:37:43", "remaining_time": "0:23:10"}
89
+ {"current_steps": 425, "total_steps": 678, "loss": 0.9761, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.119718861332098e-05, "epoch": 1.88, "percentage": 62.68, "elapsed_time": "0:38:06", "remaining_time": "0:22:41"}
90
+ {"current_steps": 430, "total_steps": 678, "loss": 0.9732, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.9072511997703226e-05, "epoch": 1.9, "percentage": 63.42, "elapsed_time": "0:38:29", "remaining_time": "0:22:12"}
91
+ {"current_steps": 435, "total_steps": 678, "loss": 0.995, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.696980264915777e-05, "epoch": 1.92, "percentage": 64.16, "elapsed_time": "0:38:52", "remaining_time": "0:21:43"}
92
+ {"current_steps": 440, "total_steps": 678, "loss": 1.0037, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.489018916804813e-05, "epoch": 1.94, "percentage": 64.9, "elapsed_time": "0:39:16", "remaining_time": "0:21:14"}
93
+ {"current_steps": 445, "total_steps": 678, "loss": 0.8503, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.283478775834811e-05, "epoch": 1.97, "percentage": 65.63, "elapsed_time": "0:39:39", "remaining_time": "0:20:45"}
94
+ {"current_steps": 450, "total_steps": 678, "loss": 0.8914, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.080470162853472e-05, "epoch": 1.99, "percentage": 66.37, "elapsed_time": "0:40:02", "remaining_time": "0:20:17"}
95
+ {"current_steps": 455, "total_steps": 678, "loss": 0.9531, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.880102039945624e-05, "epoch": 2.01, "percentage": 67.11, "elapsed_time": "0:40:25", "remaining_time": "0:19:48"}
96
+ {"current_steps": 460, "total_steps": 678, "loss": 0.9294, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.6824819519493057e-05, "epoch": 2.03, "percentage": 67.85, "elapsed_time": "0:40:48", "remaining_time": "0:19:20"}
97
+ {"current_steps": 465, "total_steps": 678, "loss": 0.8584, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.487715968732568e-05, "epoch": 2.06, "percentage": 68.58, "elapsed_time": "0:41:12", "remaining_time": "0:18:52"}
98
+ {"current_steps": 470, "total_steps": 678, "loss": 0.9285, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.29590862826191e-05, "epoch": 2.08, "percentage": 69.32, "elapsed_time": "0:41:35", "remaining_time": "0:18:24"}
99
+ {"current_steps": 475, "total_steps": 678, "loss": 0.8742, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.107162880492984e-05, "epoch": 2.1, "percentage": 70.06, "elapsed_time": "0:41:59", "remaining_time": "0:17:56"}
100
+ {"current_steps": 480, "total_steps": 678, "loss": 0.7903, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.921580032113602e-05, "epoch": 2.12, "percentage": 70.8, "elapsed_time": "0:42:23", "remaining_time": "0:17:29"}
101
+ {"current_steps": 485, "total_steps": 678, "loss": 0.8287, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.739259692168764e-05, "epoch": 2.14, "percentage": 71.53, "elapsed_time": "0:42:46", "remaining_time": "0:17:01"}
102
+ {"current_steps": 490, "total_steps": 678, "loss": 0.8967, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.560299718596889e-05, "epoch": 2.17, "percentage": 72.27, "elapsed_time": "0:43:09", "remaining_time": "0:16:33"}
103
+ {"current_steps": 495, "total_steps": 678, "loss": 0.841, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.3847961657058845e-05, "epoch": 2.19, "percentage": 73.01, "elapsed_time": "0:43:32", "remaining_time": "0:16:05"}
104
+ {"current_steps": 500, "total_steps": 678, "loss": 0.897, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.212843232617343e-05, "epoch": 2.21, "percentage": 73.75, "elapsed_time": "0:43:56", "remaining_time": "0:15:38"}
105
+ {"current_steps": 500, "total_steps": 678, "loss": null, "eval_loss": 1.0756696462631226, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 2.21, "percentage": 73.75, "elapsed_time": "0:43:56", "remaining_time": "0:15:38"}
106
+ {"current_steps": 505, "total_steps": 678, "loss": 0.9455, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.0445332127064275e-05, "epoch": 2.23, "percentage": 74.48, "elapsed_time": "0:45:35", "remaining_time": "0:15:37"}
107
+ {"current_steps": 510, "total_steps": 678, "loss": 0.9292, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.879956444064703e-05, "epoch": 2.25, "percentage": 75.22, "elapsed_time": "0:45:58", "remaining_time": "0:15:08"}
108
+ {"current_steps": 515, "total_steps": 678, "loss": 0.9016, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7192012610123774e-05, "epoch": 2.28, "percentage": 75.96, "elapsed_time": "0:46:22", "remaining_time": "0:14:40"}
109
+ {"current_steps": 520, "total_steps": 678, "loss": 0.937, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.5623539466860813e-05, "epoch": 2.3, "percentage": 76.7, "elapsed_time": "0:46:44", "remaining_time": "0:14:12"}
110
+ {"current_steps": 525, "total_steps": 678, "loss": 0.8348, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.409498686727587e-05, "epoch": 2.32, "percentage": 77.43, "elapsed_time": "0:47:07", "remaining_time": "0:13:44"}
111
+ {"current_steps": 530, "total_steps": 678, "loss": 0.8387, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2607175240983026e-05, "epoch": 2.34, "percentage": 78.17, "elapsed_time": "0:47:31", "remaining_time": "0:13:16"}
112
+ {"current_steps": 535, "total_steps": 678, "loss": 0.7736, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.1160903150438605e-05, "epoch": 2.36, "percentage": 78.91, "elapsed_time": "0:47:55", "remaining_time": "0:12:48"}
113
+ {"current_steps": 540, "total_steps": 678, "loss": 0.8672, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.9756946862323535e-05, "epoch": 2.39, "percentage": 79.65, "elapsed_time": "0:48:18", "remaining_time": "0:12:20"}
114
+ {"current_steps": 545, "total_steps": 678, "loss": 0.8688, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.839605993089307e-05, "epoch": 2.41, "percentage": 80.38, "elapsed_time": "0:48:42", "remaining_time": "0:11:53"}
115
+ {"current_steps": 550, "total_steps": 678, "loss": 0.8631, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.707897279351671e-05, "epoch": 2.43, "percentage": 81.12, "elapsed_time": "0:49:06", "remaining_time": "0:11:25"}
116
+ {"current_steps": 555, "total_steps": 678, "loss": 0.9171, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.580639237862608e-05, "epoch": 2.45, "percentage": 81.86, "elapsed_time": "0:49:28", "remaining_time": "0:10:57"}
117
+ {"current_steps": 560, "total_steps": 678, "loss": 0.9134, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4579001726280828e-05, "epoch": 2.48, "percentage": 82.6, "elapsed_time": "0:49:52", "remaining_time": "0:10:30"}
118
+ {"current_steps": 565, "total_steps": 678, "loss": 0.8363, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.339745962155613e-05, "epoch": 2.5, "percentage": 83.33, "elapsed_time": "0:50:16", "remaining_time": "0:10:03"}
119
+ {"current_steps": 570, "total_steps": 678, "loss": 0.8862, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2262400240949023e-05, "epoch": 2.52, "percentage": 84.07, "elapsed_time": "0:50:38", "remaining_time": "0:09:35"}
120
+ {"current_steps": 575, "total_steps": 678, "loss": 0.9154, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1174432811992685e-05, "epoch": 2.54, "percentage": 84.81, "elapsed_time": "0:51:02", "remaining_time": "0:09:08"}
121
+ {"current_steps": 580, "total_steps": 678, "loss": 0.9423, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.013414128626211e-05, "epoch": 2.56, "percentage": 85.55, "elapsed_time": "0:51:24", "remaining_time": "0:08:41"}
122
+ {"current_steps": 585, "total_steps": 678, "loss": 0.8462, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.142084025945984e-06, "epoch": 2.59, "percentage": 86.28, "elapsed_time": "0:51:48", "remaining_time": "0:08:14"}
123
+ {"current_steps": 590, "total_steps": 678, "loss": 0.8019, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.19879350415349e-06, "epoch": 2.61, "percentage": 87.02, "elapsed_time": "0:52:11", "remaining_time": "0:07:47"}
124
+ {"current_steps": 595, "total_steps": 678, "loss": 0.861, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.30477601911671e-06, "epoch": 2.63, "percentage": 87.76, "elapsed_time": "0:52:34", "remaining_time": "0:07:20"}
125
+ {"current_steps": 600, "total_steps": 678, "loss": 0.9597, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.460511422441984e-06, "epoch": 2.65, "percentage": 88.5, "elapsed_time": "0:52:57", "remaining_time": "0:06:53"}
126
+ {"current_steps": 600, "total_steps": 678, "loss": null, "eval_loss": 1.0745042562484741, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 2.65, "percentage": 88.5, "elapsed_time": "0:52:57", "remaining_time": "0:06:53"}
127
+ {"current_steps": 605, "total_steps": 678, "loss": 0.7797, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.66645286155616e-06, "epoch": 2.67, "percentage": 89.23, "elapsed_time": "0:54:36", "remaining_time": "0:06:35"}
128
+ {"current_steps": 610, "total_steps": 678, "loss": 0.9222, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.923026536485875e-06, "epoch": 2.7, "percentage": 89.97, "elapsed_time": "0:55:00", "remaining_time": "0:06:07"}
129
+ {"current_steps": 615, "total_steps": 678, "loss": 0.9732, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.230631471100655e-06, "epoch": 2.72, "percentage": 90.71, "elapsed_time": "0:55:23", "remaining_time": "0:05:40"}
130
+ {"current_steps": 620, "total_steps": 678, "loss": 0.7366, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.5896392989422377e-06, "epoch": 2.74, "percentage": 91.45, "elapsed_time": "0:55:47", "remaining_time": "0:05:13"}
131
+ {"current_steps": 625, "total_steps": 678, "loss": 0.9243, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.000394063755396e-06, "epoch": 2.76, "percentage": 92.18, "elapsed_time": "0:56:10", "remaining_time": "0:04:45"}
132
+ {"current_steps": 630, "total_steps": 678, "loss": 0.9263, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.4632120348272003e-06, "epoch": 2.78, "percentage": 92.92, "elapsed_time": "0:56:34", "remaining_time": "0:04:18"}
133
+ {"current_steps": 635, "total_steps": 678, "loss": 0.8541, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.9783815372338423e-06, "epoch": 2.81, "percentage": 93.66, "elapsed_time": "0:56:57", "remaining_time": "0:03:51"}
134
+ {"current_steps": 640, "total_steps": 678, "loss": 0.828, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5461627970860814e-06, "epoch": 2.83, "percentage": 94.4, "elapsed_time": "0:57:21", "remaining_time": "0:03:24"}
135
+ {"current_steps": 645, "total_steps": 678, "loss": 0.8446, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1667878018564171e-06, "epoch": 2.85, "percentage": 95.13, "elapsed_time": "0:57:45", "remaining_time": "0:02:57"}
136
+ {"current_steps": 650, "total_steps": 678, "loss": 0.7749, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.404601758630892e-07, "epoch": 2.87, "percentage": 95.87, "elapsed_time": "0:58:08", "remaining_time": "0:02:30"}
137
+ {"current_steps": 655, "total_steps": 678, "loss": 0.8738, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.673550709774267e-07, "epoch": 2.9, "percentage": 96.61, "elapsed_time": "0:58:32", "remaining_time": "0:02:03"}
138
+ {"current_steps": 660, "total_steps": 678, "loss": 0.895, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.4761907261356976e-07, "epoch": 2.92, "percentage": 97.35, "elapsed_time": "0:58:55", "remaining_time": "0:01:36"}
139
+ {"current_steps": 665, "total_steps": 678, "loss": 0.8552, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.813701210506946e-07, "epoch": 2.94, "percentage": 98.08, "elapsed_time": "0:59:18", "remaining_time": "0:01:09"}
140
+ {"current_steps": 670, "total_steps": 678, "loss": 0.8475, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.869744813023937e-08, "epoch": 2.96, "percentage": 98.82, "elapsed_time": "0:59:41", "remaining_time": "0:00:42"}
141
+ {"current_steps": 675, "total_steps": 678, "loss": 0.9076, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.661529361892907e-09, "epoch": 2.98, "percentage": 99.56, "elapsed_time": "1:00:04", "remaining_time": "0:00:16"}
142
+ {"current_steps": 678, "total_steps": 678, "loss": null, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 3.0, "percentage": 100.0, "elapsed_time": "1:00:18", "remaining_time": "0:00:00"}
143
+ {"current_steps": 114, "total_steps": 114, "loss": null, "eval_loss": 1.061107873916626, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 3.0, "percentage": 100.0, "elapsed_time": "1:01:33", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,888 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.061107873916626,
3
+ "best_model_checkpoint": "saves/LLaMA2-13B/lora/train_2024-01-03-02-43-22/checkpoint-400",
4
+ "epoch": 2.9966850828729283,
5
+ "eval_steps": 100,
6
+ "global_step": 678,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02,
13
+ "learning_rate": 0.00019997316318671806,
14
+ "loss": 1.4828,
15
+ "step": 5
16
+ },
17
+ {
18
+ "epoch": 0.04,
19
+ "learning_rate": 0.00019989266715116316,
20
+ "loss": 1.1204,
21
+ "step": 10
22
+ },
23
+ {
24
+ "epoch": 0.07,
25
+ "learning_rate": 0.00019975855509847686,
26
+ "loss": 1.1449,
27
+ "step": 15
28
+ },
29
+ {
30
+ "epoch": 0.09,
31
+ "learning_rate": 0.00019957089901146148,
32
+ "loss": 1.1126,
33
+ "step": 20
34
+ },
35
+ {
36
+ "epoch": 0.11,
37
+ "learning_rate": 0.00019932979961194435,
38
+ "loss": 1.1048,
39
+ "step": 25
40
+ },
41
+ {
42
+ "epoch": 0.13,
43
+ "learning_rate": 0.0001990353863067169,
44
+ "loss": 1.2161,
45
+ "step": 30
46
+ },
47
+ {
48
+ "epoch": 0.15,
49
+ "learning_rate": 0.00019868781711807705,
50
+ "loss": 1.0051,
51
+ "step": 35
52
+ },
53
+ {
54
+ "epoch": 0.18,
55
+ "learning_rate": 0.00019828727859901317,
56
+ "loss": 1.0314,
57
+ "step": 40
58
+ },
59
+ {
60
+ "epoch": 0.2,
61
+ "learning_rate": 0.00019783398573307428,
62
+ "loss": 1.0718,
63
+ "step": 45
64
+ },
65
+ {
66
+ "epoch": 0.22,
67
+ "learning_rate": 0.00019732818181898045,
68
+ "loss": 1.1571,
69
+ "step": 50
70
+ },
71
+ {
72
+ "epoch": 0.24,
73
+ "learning_rate": 0.0001967701383400357,
74
+ "loss": 1.0602,
75
+ "step": 55
76
+ },
77
+ {
78
+ "epoch": 0.27,
79
+ "learning_rate": 0.0001961601548184129,
80
+ "loss": 1.1215,
81
+ "step": 60
82
+ },
83
+ {
84
+ "epoch": 0.29,
85
+ "learning_rate": 0.00019549855865438965,
86
+ "loss": 1.1434,
87
+ "step": 65
88
+ },
89
+ {
90
+ "epoch": 0.31,
91
+ "learning_rate": 0.00019478570495062037,
92
+ "loss": 1.0844,
93
+ "step": 70
94
+ },
95
+ {
96
+ "epoch": 0.33,
97
+ "learning_rate": 0.00019402197632153992,
98
+ "loss": 1.1123,
99
+ "step": 75
100
+ },
101
+ {
102
+ "epoch": 0.35,
103
+ "learning_rate": 0.00019320778268800066,
104
+ "loss": 1.1054,
105
+ "step": 80
106
+ },
107
+ {
108
+ "epoch": 0.38,
109
+ "learning_rate": 0.00019234356105725297,
110
+ "loss": 1.0368,
111
+ "step": 85
112
+ },
113
+ {
114
+ "epoch": 0.4,
115
+ "learning_rate": 0.00019142977528838762,
116
+ "loss": 1.1195,
117
+ "step": 90
118
+ },
119
+ {
120
+ "epoch": 0.42,
121
+ "learning_rate": 0.00019046691584336577,
122
+ "loss": 1.0778,
123
+ "step": 95
124
+ },
125
+ {
126
+ "epoch": 0.44,
127
+ "learning_rate": 0.0001894554995237703,
128
+ "loss": 1.0228,
129
+ "step": 100
130
+ },
131
+ {
132
+ "epoch": 0.44,
133
+ "eval_loss": 1.0887844562530518,
134
+ "eval_runtime": 75.527,
135
+ "eval_samples_per_second": 11.982,
136
+ "eval_steps_per_second": 1.509,
137
+ "step": 100
138
+ },
139
+ {
140
+ "epoch": 0.46,
141
+ "learning_rate": 0.0001883960691934196,
142
+ "loss": 1.1156,
143
+ "step": 105
144
+ },
145
+ {
146
+ "epoch": 0.49,
147
+ "learning_rate": 0.00018728919348699283,
148
+ "loss": 1.0259,
149
+ "step": 110
150
+ },
151
+ {
152
+ "epoch": 0.51,
153
+ "learning_rate": 0.00018613546650482322,
154
+ "loss": 1.0112,
155
+ "step": 115
156
+ },
157
+ {
158
+ "epoch": 0.53,
159
+ "learning_rate": 0.00018493550749402278,
160
+ "loss": 1.0279,
161
+ "step": 120
162
+ },
163
+ {
164
+ "epoch": 0.55,
165
+ "learning_rate": 0.00018368996051610986,
166
+ "loss": 1.105,
167
+ "step": 125
168
+ },
169
+ {
170
+ "epoch": 0.57,
171
+ "learning_rate": 0.00018239949410131802,
172
+ "loss": 1.041,
173
+ "step": 130
174
+ },
175
+ {
176
+ "epoch": 0.6,
177
+ "learning_rate": 0.00018106480088977172,
178
+ "loss": 1.0475,
179
+ "step": 135
180
+ },
181
+ {
182
+ "epoch": 0.62,
183
+ "learning_rate": 0.00017968659725972112,
184
+ "loss": 1.0241,
185
+ "step": 140
186
+ },
187
+ {
188
+ "epoch": 0.64,
189
+ "learning_rate": 0.00017826562294303585,
190
+ "loss": 1.0071,
191
+ "step": 145
192
+ },
193
+ {
194
+ "epoch": 0.66,
195
+ "learning_rate": 0.0001768026406281642,
196
+ "loss": 0.9714,
197
+ "step": 150
198
+ },
199
+ {
200
+ "epoch": 0.69,
201
+ "learning_rate": 0.00017529843555077066,
202
+ "loss": 1.0376,
203
+ "step": 155
204
+ },
205
+ {
206
+ "epoch": 0.71,
207
+ "learning_rate": 0.00017375381507227108,
208
+ "loss": 0.9806,
209
+ "step": 160
210
+ },
211
+ {
212
+ "epoch": 0.73,
213
+ "learning_rate": 0.00017216960824649303,
214
+ "loss": 1.1022,
215
+ "step": 165
216
+ },
217
+ {
218
+ "epoch": 0.75,
219
+ "learning_rate": 0.00017054666537469213,
220
+ "loss": 0.9723,
221
+ "step": 170
222
+ },
223
+ {
224
+ "epoch": 0.77,
225
+ "learning_rate": 0.00016888585754916476,
226
+ "loss": 1.1081,
227
+ "step": 175
228
+ },
229
+ {
230
+ "epoch": 0.8,
231
+ "learning_rate": 0.00016718807618570106,
232
+ "loss": 1.0525,
233
+ "step": 180
234
+ },
235
+ {
236
+ "epoch": 0.82,
237
+ "learning_rate": 0.00016545423254513004,
238
+ "loss": 1.0761,
239
+ "step": 185
240
+ },
241
+ {
242
+ "epoch": 0.84,
243
+ "learning_rate": 0.00016368525724421248,
244
+ "loss": 1.1493,
245
+ "step": 190
246
+ },
247
+ {
248
+ "epoch": 0.86,
249
+ "learning_rate": 0.00016188209975614542,
250
+ "loss": 0.9942,
251
+ "step": 195
252
+ },
253
+ {
254
+ "epoch": 0.88,
255
+ "learning_rate": 0.00016004572790094535,
256
+ "loss": 1.0068,
257
+ "step": 200
258
+ },
259
+ {
260
+ "epoch": 0.88,
261
+ "eval_loss": 1.0737706422805786,
262
+ "eval_runtime": 75.4895,
263
+ "eval_samples_per_second": 11.988,
264
+ "eval_steps_per_second": 1.51,
265
+ "step": 200
266
+ },
267
+ {
268
+ "epoch": 0.91,
269
+ "learning_rate": 0.00015817712732598413,
270
+ "loss": 1.0579,
271
+ "step": 205
272
+ },
273
+ {
274
+ "epoch": 0.93,
275
+ "learning_rate": 0.00015627730097695638,
276
+ "loss": 1.0985,
277
+ "step": 210
278
+ },
279
+ {
280
+ "epoch": 0.95,
281
+ "learning_rate": 0.00015434726855956206,
282
+ "loss": 1.071,
283
+ "step": 215
284
+ },
285
+ {
286
+ "epoch": 0.97,
287
+ "learning_rate": 0.00015238806599219336,
288
+ "loss": 1.0617,
289
+ "step": 220
290
+ },
291
+ {
292
+ "epoch": 0.99,
293
+ "learning_rate": 0.00015040074484992,
294
+ "loss": 0.9789,
295
+ "step": 225
296
+ },
297
+ {
298
+ "epoch": 1.02,
299
+ "learning_rate": 0.00014838637180007047,
300
+ "loss": 1.072,
301
+ "step": 230
302
+ },
303
+ {
304
+ "epoch": 1.04,
305
+ "learning_rate": 0.00014634602802971312,
306
+ "loss": 1.0098,
307
+ "step": 235
308
+ },
309
+ {
310
+ "epoch": 1.06,
311
+ "learning_rate": 0.00014428080866534396,
312
+ "loss": 1.0222,
313
+ "step": 240
314
+ },
315
+ {
316
+ "epoch": 1.08,
317
+ "learning_rate": 0.0001421918221850923,
318
+ "loss": 0.9734,
319
+ "step": 245
320
+ },
321
+ {
322
+ "epoch": 1.1,
323
+ "learning_rate": 0.00014008018982376044,
324
+ "loss": 0.9364,
325
+ "step": 250
326
+ },
327
+ {
328
+ "epoch": 1.13,
329
+ "learning_rate": 0.00013794704497101655,
330
+ "loss": 0.9735,
331
+ "step": 255
332
+ },
333
+ {
334
+ "epoch": 1.15,
335
+ "learning_rate": 0.00013579353256306287,
336
+ "loss": 1.0933,
337
+ "step": 260
338
+ },
339
+ {
340
+ "epoch": 1.17,
341
+ "learning_rate": 0.00013362080846810725,
342
+ "loss": 0.9764,
343
+ "step": 265
344
+ },
345
+ {
346
+ "epoch": 1.19,
347
+ "learning_rate": 0.00013143003886596669,
348
+ "loss": 1.0584,
349
+ "step": 270
350
+ },
351
+ {
352
+ "epoch": 1.22,
353
+ "learning_rate": 0.00012922239962213637,
354
+ "loss": 0.9116,
355
+ "step": 275
356
+ },
357
+ {
358
+ "epoch": 1.24,
359
+ "learning_rate": 0.00012699907565665982,
360
+ "loss": 1.0587,
361
+ "step": 280
362
+ },
363
+ {
364
+ "epoch": 1.26,
365
+ "learning_rate": 0.00012476126030813963,
366
+ "loss": 0.9342,
367
+ "step": 285
368
+ },
369
+ {
370
+ "epoch": 1.28,
371
+ "learning_rate": 0.00012251015469322916,
372
+ "loss": 0.9826,
373
+ "step": 290
374
+ },
375
+ {
376
+ "epoch": 1.3,
377
+ "learning_rate": 0.00012024696706194967,
378
+ "loss": 1.0656,
379
+ "step": 295
380
+ },
381
+ {
382
+ "epoch": 1.33,
383
+ "learning_rate": 0.00011797291214917881,
384
+ "loss": 1.0218,
385
+ "step": 300
386
+ },
387
+ {
388
+ "epoch": 1.33,
389
+ "eval_loss": 1.06796395778656,
390
+ "eval_runtime": 75.5412,
391
+ "eval_samples_per_second": 11.98,
392
+ "eval_steps_per_second": 1.509,
393
+ "step": 300
394
+ },
395
+ {
396
+ "epoch": 1.35,
397
+ "learning_rate": 0.00011568921052265836,
398
+ "loss": 0.9915,
399
+ "step": 305
400
+ },
401
+ {
402
+ "epoch": 1.37,
403
+ "learning_rate": 0.00011339708792787119,
404
+ "loss": 0.9592,
405
+ "step": 310
406
+ },
407
+ {
408
+ "epoch": 1.39,
409
+ "learning_rate": 0.00011109777463013915,
410
+ "loss": 0.9875,
411
+ "step": 315
412
+ },
413
+ {
414
+ "epoch": 1.41,
415
+ "learning_rate": 0.00010879250475429523,
416
+ "loss": 1.1583,
417
+ "step": 320
418
+ },
419
+ {
420
+ "epoch": 1.44,
421
+ "learning_rate": 0.00010648251562228386,
422
+ "loss": 0.977,
423
+ "step": 325
424
+ },
425
+ {
426
+ "epoch": 1.46,
427
+ "learning_rate": 0.00010416904708904548,
428
+ "loss": 1.041,
429
+ "step": 330
430
+ },
431
+ {
432
+ "epoch": 1.48,
433
+ "learning_rate": 0.00010185334087704124,
434
+ "loss": 0.972,
435
+ "step": 335
436
+ },
437
+ {
438
+ "epoch": 1.5,
439
+ "learning_rate": 9.953663990977568e-05,
440
+ "loss": 1.0115,
441
+ "step": 340
442
+ },
443
+ {
444
+ "epoch": 1.52,
445
+ "learning_rate": 9.722018764467461e-05,
446
+ "loss": 1.0078,
447
+ "step": 345
448
+ },
449
+ {
450
+ "epoch": 1.55,
451
+ "learning_rate": 9.490522740567633e-05,
452
+ "loss": 0.955,
453
+ "step": 350
454
+ },
455
+ {
456
+ "epoch": 1.57,
457
+ "learning_rate": 9.259300171589456e-05,
458
+ "loss": 0.9719,
459
+ "step": 355
460
+ },
461
+ {
462
+ "epoch": 1.59,
463
+ "learning_rate": 9.028475163071141e-05,
464
+ "loss": 0.871,
465
+ "step": 360
466
+ },
467
+ {
468
+ "epoch": 1.61,
469
+ "learning_rate": 8.798171607165778e-05,
470
+ "loss": 0.9568,
471
+ "step": 365
472
+ },
473
+ {
474
+ "epoch": 1.64,
475
+ "learning_rate": 8.568513116143919e-05,
476
+ "loss": 0.9466,
477
+ "step": 370
478
+ },
479
+ {
480
+ "epoch": 1.66,
481
+ "learning_rate": 8.339622956046417e-05,
482
+ "loss": 0.8967,
483
+ "step": 375
484
+ },
485
+ {
486
+ "epoch": 1.68,
487
+ "learning_rate": 8.111623980523035e-05,
488
+ "loss": 0.9465,
489
+ "step": 380
490
+ },
491
+ {
492
+ "epoch": 1.7,
493
+ "learning_rate": 7.884638564892472e-05,
494
+ "loss": 0.9499,
495
+ "step": 385
496
+ },
497
+ {
498
+ "epoch": 1.72,
499
+ "learning_rate": 7.658788540459062e-05,
500
+ "loss": 0.8955,
501
+ "step": 390
502
+ },
503
+ {
504
+ "epoch": 1.75,
505
+ "learning_rate": 7.434195129121518e-05,
506
+ "loss": 0.8982,
507
+ "step": 395
508
+ },
509
+ {
510
+ "epoch": 1.77,
511
+ "learning_rate": 7.210978878308729e-05,
512
+ "loss": 1.084,
513
+ "step": 400
514
+ },
515
+ {
516
+ "epoch": 1.77,
517
+ "eval_loss": 1.061107873916626,
518
+ "eval_runtime": 75.5635,
519
+ "eval_samples_per_second": 11.977,
520
+ "eval_steps_per_second": 1.509,
521
+ "step": 400
522
+ },
523
+ {
524
+ "epoch": 1.79,
525
+ "learning_rate": 6.989259596277582e-05,
526
+ "loss": 0.8936,
527
+ "step": 405
528
+ },
529
+ {
530
+ "epoch": 1.81,
531
+ "learning_rate": 6.76915628780754e-05,
532
+ "loss": 0.8794,
533
+ "step": 410
534
+ },
535
+ {
536
+ "epoch": 1.83,
537
+ "learning_rate": 6.55078709032644e-05,
538
+ "loss": 0.9631,
539
+ "step": 415
540
+ },
541
+ {
542
+ "epoch": 1.86,
543
+ "learning_rate": 6.334269210501875e-05,
544
+ "loss": 0.9636,
545
+ "step": 420
546
+ },
547
+ {
548
+ "epoch": 1.88,
549
+ "learning_rate": 6.119718861332098e-05,
550
+ "loss": 0.9761,
551
+ "step": 425
552
+ },
553
+ {
554
+ "epoch": 1.9,
555
+ "learning_rate": 5.9072511997703226e-05,
556
+ "loss": 0.9732,
557
+ "step": 430
558
+ },
559
+ {
560
+ "epoch": 1.92,
561
+ "learning_rate": 5.696980264915777e-05,
562
+ "loss": 0.995,
563
+ "step": 435
564
+ },
565
+ {
566
+ "epoch": 1.94,
567
+ "learning_rate": 5.489018916804813e-05,
568
+ "loss": 1.0037,
569
+ "step": 440
570
+ },
571
+ {
572
+ "epoch": 1.97,
573
+ "learning_rate": 5.283478775834811e-05,
574
+ "loss": 0.8503,
575
+ "step": 445
576
+ },
577
+ {
578
+ "epoch": 1.99,
579
+ "learning_rate": 5.080470162853472e-05,
580
+ "loss": 0.8914,
581
+ "step": 450
582
+ },
583
+ {
584
+ "epoch": 2.01,
585
+ "learning_rate": 4.880102039945624e-05,
586
+ "loss": 0.9531,
587
+ "step": 455
588
+ },
589
+ {
590
+ "epoch": 2.03,
591
+ "learning_rate": 4.6824819519493057e-05,
592
+ "loss": 0.9294,
593
+ "step": 460
594
+ },
595
+ {
596
+ "epoch": 2.06,
597
+ "learning_rate": 4.487715968732568e-05,
598
+ "loss": 0.8584,
599
+ "step": 465
600
+ },
601
+ {
602
+ "epoch": 2.08,
603
+ "learning_rate": 4.29590862826191e-05,
604
+ "loss": 0.9285,
605
+ "step": 470
606
+ },
607
+ {
608
+ "epoch": 2.1,
609
+ "learning_rate": 4.107162880492984e-05,
610
+ "loss": 0.8742,
611
+ "step": 475
612
+ },
613
+ {
614
+ "epoch": 2.12,
615
+ "learning_rate": 3.921580032113602e-05,
616
+ "loss": 0.7903,
617
+ "step": 480
618
+ },
619
+ {
620
+ "epoch": 2.14,
621
+ "learning_rate": 3.739259692168764e-05,
622
+ "loss": 0.8287,
623
+ "step": 485
624
+ },
625
+ {
626
+ "epoch": 2.17,
627
+ "learning_rate": 3.560299718596889e-05,
628
+ "loss": 0.8967,
629
+ "step": 490
630
+ },
631
+ {
632
+ "epoch": 2.19,
633
+ "learning_rate": 3.3847961657058845e-05,
634
+ "loss": 0.841,
635
+ "step": 495
636
+ },
637
+ {
638
+ "epoch": 2.21,
639
+ "learning_rate": 3.212843232617343e-05,
640
+ "loss": 0.897,
641
+ "step": 500
642
+ },
643
+ {
644
+ "epoch": 2.21,
645
+ "eval_loss": 1.0756696462631226,
646
+ "eval_runtime": 75.5409,
647
+ "eval_samples_per_second": 11.98,
648
+ "eval_steps_per_second": 1.509,
649
+ "step": 500
650
+ },
651
+ {
652
+ "epoch": 2.23,
653
+ "learning_rate": 3.0445332127064275e-05,
654
+ "loss": 0.9455,
655
+ "step": 505
656
+ },
657
+ {
658
+ "epoch": 2.25,
659
+ "learning_rate": 2.879956444064703e-05,
660
+ "loss": 0.9292,
661
+ "step": 510
662
+ },
663
+ {
664
+ "epoch": 2.28,
665
+ "learning_rate": 2.7192012610123774e-05,
666
+ "loss": 0.9016,
667
+ "step": 515
668
+ },
669
+ {
670
+ "epoch": 2.3,
671
+ "learning_rate": 2.5623539466860813e-05,
672
+ "loss": 0.937,
673
+ "step": 520
674
+ },
675
+ {
676
+ "epoch": 2.32,
677
+ "learning_rate": 2.409498686727587e-05,
678
+ "loss": 0.8348,
679
+ "step": 525
680
+ },
681
+ {
682
+ "epoch": 2.34,
683
+ "learning_rate": 2.2607175240983026e-05,
684
+ "loss": 0.8387,
685
+ "step": 530
686
+ },
687
+ {
688
+ "epoch": 2.36,
689
+ "learning_rate": 2.1160903150438605e-05,
690
+ "loss": 0.7736,
691
+ "step": 535
692
+ },
693
+ {
694
+ "epoch": 2.39,
695
+ "learning_rate": 1.9756946862323535e-05,
696
+ "loss": 0.8672,
697
+ "step": 540
698
+ },
699
+ {
700
+ "epoch": 2.41,
701
+ "learning_rate": 1.839605993089307e-05,
702
+ "loss": 0.8688,
703
+ "step": 545
704
+ },
705
+ {
706
+ "epoch": 2.43,
707
+ "learning_rate": 1.707897279351671e-05,
708
+ "loss": 0.8631,
709
+ "step": 550
710
+ },
711
+ {
712
+ "epoch": 2.45,
713
+ "learning_rate": 1.580639237862608e-05,
714
+ "loss": 0.9171,
715
+ "step": 555
716
+ },
717
+ {
718
+ "epoch": 2.48,
719
+ "learning_rate": 1.4579001726280828e-05,
720
+ "loss": 0.9134,
721
+ "step": 560
722
+ },
723
+ {
724
+ "epoch": 2.5,
725
+ "learning_rate": 1.339745962155613e-05,
726
+ "loss": 0.8363,
727
+ "step": 565
728
+ },
729
+ {
730
+ "epoch": 2.52,
731
+ "learning_rate": 1.2262400240949023e-05,
732
+ "loss": 0.8862,
733
+ "step": 570
734
+ },
735
+ {
736
+ "epoch": 2.54,
737
+ "learning_rate": 1.1174432811992685e-05,
738
+ "loss": 0.9154,
739
+ "step": 575
740
+ },
741
+ {
742
+ "epoch": 2.56,
743
+ "learning_rate": 1.013414128626211e-05,
744
+ "loss": 0.9423,
745
+ "step": 580
746
+ },
747
+ {
748
+ "epoch": 2.59,
749
+ "learning_rate": 9.142084025945984e-06,
750
+ "loss": 0.8462,
751
+ "step": 585
752
+ },
753
+ {
754
+ "epoch": 2.61,
755
+ "learning_rate": 8.19879350415349e-06,
756
+ "loss": 0.8019,
757
+ "step": 590
758
+ },
759
+ {
760
+ "epoch": 2.63,
761
+ "learning_rate": 7.30477601911671e-06,
762
+ "loss": 0.861,
763
+ "step": 595
764
+ },
765
+ {
766
+ "epoch": 2.65,
767
+ "learning_rate": 6.460511422441984e-06,
768
+ "loss": 0.9597,
769
+ "step": 600
770
+ },
771
+ {
772
+ "epoch": 2.65,
773
+ "eval_loss": 1.0745042562484741,
774
+ "eval_runtime": 75.5387,
775
+ "eval_samples_per_second": 11.981,
776
+ "eval_steps_per_second": 1.509,
777
+ "step": 600
778
+ },
779
+ {
780
+ "epoch": 2.67,
781
+ "learning_rate": 5.66645286155616e-06,
782
+ "loss": 0.7797,
783
+ "step": 605
784
+ },
785
+ {
786
+ "epoch": 2.7,
787
+ "learning_rate": 4.923026536485875e-06,
788
+ "loss": 0.9222,
789
+ "step": 610
790
+ },
791
+ {
792
+ "epoch": 2.72,
793
+ "learning_rate": 4.230631471100655e-06,
794
+ "loss": 0.9732,
795
+ "step": 615
796
+ },
797
+ {
798
+ "epoch": 2.74,
799
+ "learning_rate": 3.5896392989422377e-06,
800
+ "loss": 0.7366,
801
+ "step": 620
802
+ },
803
+ {
804
+ "epoch": 2.76,
805
+ "learning_rate": 3.000394063755396e-06,
806
+ "loss": 0.9243,
807
+ "step": 625
808
+ },
809
+ {
810
+ "epoch": 2.78,
811
+ "learning_rate": 2.4632120348272003e-06,
812
+ "loss": 0.9263,
813
+ "step": 630
814
+ },
815
+ {
816
+ "epoch": 2.81,
817
+ "learning_rate": 1.9783815372338423e-06,
818
+ "loss": 0.8541,
819
+ "step": 635
820
+ },
821
+ {
822
+ "epoch": 2.83,
823
+ "learning_rate": 1.5461627970860814e-06,
824
+ "loss": 0.828,
825
+ "step": 640
826
+ },
827
+ {
828
+ "epoch": 2.85,
829
+ "learning_rate": 1.1667878018564171e-06,
830
+ "loss": 0.8446,
831
+ "step": 645
832
+ },
833
+ {
834
+ "epoch": 2.87,
835
+ "learning_rate": 8.404601758630892e-07,
836
+ "loss": 0.7749,
837
+ "step": 650
838
+ },
839
+ {
840
+ "epoch": 2.9,
841
+ "learning_rate": 5.673550709774267e-07,
842
+ "loss": 0.8738,
843
+ "step": 655
844
+ },
845
+ {
846
+ "epoch": 2.92,
847
+ "learning_rate": 3.4761907261356976e-07,
848
+ "loss": 0.895,
849
+ "step": 660
850
+ },
851
+ {
852
+ "epoch": 2.94,
853
+ "learning_rate": 1.813701210506946e-07,
854
+ "loss": 0.8552,
855
+ "step": 665
856
+ },
857
+ {
858
+ "epoch": 2.96,
859
+ "learning_rate": 6.869744813023937e-08,
860
+ "loss": 0.8475,
861
+ "step": 670
862
+ },
863
+ {
864
+ "epoch": 2.98,
865
+ "learning_rate": 9.661529361892907e-09,
866
+ "loss": 0.9076,
867
+ "step": 675
868
+ },
869
+ {
870
+ "epoch": 3.0,
871
+ "step": 678,
872
+ "total_flos": 1.5685942606774272e+17,
873
+ "train_loss": 0.9757168982232918,
874
+ "train_runtime": 3618.1154,
875
+ "train_samples_per_second": 3.0,
876
+ "train_steps_per_second": 0.187
877
+ }
878
+ ],
879
+ "logging_steps": 5,
880
+ "max_steps": 678,
881
+ "num_input_tokens_seen": 0,
882
+ "num_train_epochs": 3,
883
+ "save_steps": 100,
884
+ "total_flos": 1.5685942606774272e+17,
885
+ "train_batch_size": 4,
886
+ "trial_name": null,
887
+ "trial_params": null
888
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8f3fda749ef742620436ea1dc1427c111735aa4ba53fdc18fb8f38a62d141f0
3
+ size 4920