dimasik87 commited on
Commit
e712d9c
1 Parent(s): a88ca00

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:020c0fb13a58b5560f02c8bc234c279da102ee2eb7f298e1a83031fe8ed77a9d
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8373c5e4c8ca9133fbe3ff268192eb46706f3a841ef92d81816a0577e1744f3
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:10233c45644298c18560fe20a01ae5677c0810ffb56603956549cd685a5b636e
3
  size 335922386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a134857514fb9dfe5e2f59d16b28f578292d717fe3548b42257a0cb8f318fcf
3
  size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d2ed7bb47263863992b4cee8254a6b09097065ba424f1725823e1454e5ac93b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5607b27ad36e8b8b6a123386b38e0716be53aa03eb9581fdcae093ba333e0d68
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5b52924c88c1c80163d374a6650253dd74b8b46052a3da6fc5e4076e57b5effa
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d605401690d7669ff16aeaca6820cbd8d0d605afe748c51045ce90888810a22
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.04306864064602961,
5
  "eval_steps": 4,
6
- "global_step": 16,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -159,6 +159,42 @@
159
  "eval_samples_per_second": 7.809,
160
  "eval_steps_per_second": 7.809,
161
  "step": 16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  }
163
  ],
164
  "logging_steps": 1,
@@ -178,7 +214,7 @@
178
  "attributes": {}
179
  }
180
  },
181
- "total_flos": 6584249641598976.0,
182
  "train_batch_size": 1,
183
  "trial_name": null,
184
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.05383580080753701,
5
  "eval_steps": 4,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
159
  "eval_samples_per_second": 7.809,
160
  "eval_steps_per_second": 7.809,
161
  "step": 16
162
+ },
163
+ {
164
+ "epoch": 0.04576043068640646,
165
+ "grad_norm": 1.0929858684539795,
166
+ "learning_rate": 0.00018526401643540922,
167
+ "loss": 0.2242,
168
+ "step": 17
169
+ },
170
+ {
171
+ "epoch": 0.04845222072678331,
172
+ "grad_norm": 1.4121438264846802,
173
+ "learning_rate": 0.00018090169943749476,
174
+ "loss": 0.2983,
175
+ "step": 18
176
+ },
177
+ {
178
+ "epoch": 0.05114401076716016,
179
+ "grad_norm": 0.9734262824058533,
180
+ "learning_rate": 0.0001760405965600031,
181
+ "loss": 0.3084,
182
+ "step": 19
183
+ },
184
+ {
185
+ "epoch": 0.05383580080753701,
186
+ "grad_norm": 0.9649080038070679,
187
+ "learning_rate": 0.00017071067811865476,
188
+ "loss": 0.2583,
189
+ "step": 20
190
+ },
191
+ {
192
+ "epoch": 0.05383580080753701,
193
+ "eval_loss": 0.28261926770210266,
194
+ "eval_runtime": 10.3045,
195
+ "eval_samples_per_second": 7.667,
196
+ "eval_steps_per_second": 7.667,
197
+ "step": 20
198
  }
199
  ],
200
  "logging_steps": 1,
 
214
  "attributes": {}
215
  }
216
  },
217
+ "total_flos": 8068024208719872.0,
218
  "train_batch_size": 1,
219
  "trial_name": null,
220
  "trial_params": null