dimasik87 commited on
Commit
4d364fc
1 Parent(s): fed2121

Training in progress, step 16, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b776cef4a8b110f29f03f1f2a505ab26816d15755d261cab864eced6635967f
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:020c0fb13a58b5560f02c8bc234c279da102ee2eb7f298e1a83031fe8ed77a9d
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:db7cd6507b3e6336497c085dcdbab5771e4bc873fa652e44ade43922d4c8c091
3
  size 335922386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10233c45644298c18560fe20a01ae5677c0810ffb56603956549cd685a5b636e
3
  size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d24904fc57fd2178e69b645251775fb3fd05ea113003861d10791be78a06f1a9
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d2ed7bb47263863992b4cee8254a6b09097065ba424f1725823e1454e5ac93b
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:68888158764ed5e658b457a541f86335ea31432325308674d2962aa98e037fa4
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b52924c88c1c80163d374a6650253dd74b8b46052a3da6fc5e4076e57b5effa
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.03230148048452221,
5
  "eval_steps": 4,
6
- "global_step": 12,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -123,6 +123,42 @@
123
  "eval_samples_per_second": 7.844,
124
  "eval_steps_per_second": 7.844,
125
  "step": 12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  }
127
  ],
128
  "logging_steps": 1,
@@ -142,7 +178,7 @@
142
  "attributes": {}
143
  }
144
  },
145
- "total_flos": 5007739164033024.0,
146
  "train_batch_size": 1,
147
  "trial_name": null,
148
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.04306864064602961,
5
  "eval_steps": 4,
6
+ "global_step": 16,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
123
  "eval_samples_per_second": 7.844,
124
  "eval_steps_per_second": 7.844,
125
  "step": 12
126
+ },
127
+ {
128
+ "epoch": 0.034993270524899055,
129
+ "grad_norm": 1.4582064151763916,
130
+ "learning_rate": 0.00019723699203976766,
131
+ "loss": 0.3575,
132
+ "step": 13
133
+ },
134
+ {
135
+ "epoch": 0.03768506056527591,
136
+ "grad_norm": 1.6408145427703857,
137
+ "learning_rate": 0.00019510565162951537,
138
+ "loss": 0.4609,
139
+ "step": 14
140
+ },
141
+ {
142
+ "epoch": 0.040376850605652756,
143
+ "grad_norm": 1.7364938259124756,
144
+ "learning_rate": 0.0001923879532511287,
145
+ "loss": 0.4607,
146
+ "step": 15
147
+ },
148
+ {
149
+ "epoch": 0.04306864064602961,
150
+ "grad_norm": 2.5827343463897705,
151
+ "learning_rate": 0.0001891006524188368,
152
+ "loss": 0.5915,
153
+ "step": 16
154
+ },
155
+ {
156
+ "epoch": 0.04306864064602961,
157
+ "eval_loss": 0.29014351963996887,
158
+ "eval_runtime": 10.1162,
159
+ "eval_samples_per_second": 7.809,
160
+ "eval_steps_per_second": 7.809,
161
+ "step": 16
162
  }
163
  ],
164
  "logging_steps": 1,
 
178
  "attributes": {}
179
  }
180
  },
181
+ "total_flos": 6584249641598976.0,
182
  "train_batch_size": 1,
183
  "trial_name": null,
184
  "trial_params": null