ardaspear commited on
Commit
9727fd5
·
verified ·
1 Parent(s): 7cce5f1

Training in progress, step 25, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:15534d3755a6a4e5e30be58ff2c4a2167b06163faa57b68008187ad1d7497fd7
3
  size 645975704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3aa99e7cf2c3076f5ba88203146caeafb7c3c3e471219a98518a8ae34ea3bac0
3
  size 645975704
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6489f32e9d976acd503d0b7c43cb423d90e76a6dcb755e2bf8df2eb2ca94cc0c
3
  size 328468404
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f808c01192174f4f98f7f0269a5c6044231c9e8c5247c1824b0f10192f14b6fd
3
  size 328468404
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:842160c84da2f20ba4769075d37abcad1cd79e1e28006529f9f33b1c2157a9d4
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e6d5121af4ad8437561e7d254c21798ee487271bfae0bf79a1a5a2f43ee4420
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:02d97e2af7e51ba89efcffc7f2a661ca0885e61db7f78987db5df30514ab62df
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6676fe28230ae15b45fb334c871c6fdf1a7984a935952b9f8650896c37a8c106
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0018657369077742926,
5
  "eval_steps": 5,
6
- "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -89,6 +89,28 @@
89
  "eval_samples_per_second": 16.013,
90
  "eval_steps_per_second": 4.003,
91
  "step": 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  }
93
  ],
94
  "logging_steps": 3,
@@ -108,7 +130,7 @@
108
  "attributes": {}
109
  }
110
  },
111
- "total_flos": 2.4438711779328e+16,
112
  "train_batch_size": 4,
113
  "trial_name": null,
114
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0023321711347178654,
5
  "eval_steps": 5,
6
+ "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
89
  "eval_samples_per_second": 16.013,
90
  "eval_steps_per_second": 4.003,
91
  "step": 20
92
+ },
93
+ {
94
+ "epoch": 0.001959023753163007,
95
+ "grad_norm": 3.2567591667175293,
96
+ "learning_rate": 8.247240241650918e-05,
97
+ "loss": 1.0721,
98
+ "step": 21
99
+ },
100
+ {
101
+ "epoch": 0.002238884289329151,
102
+ "grad_norm": 2.1846158504486084,
103
+ "learning_rate": 7.269952498697734e-05,
104
+ "loss": 0.9161,
105
+ "step": 24
106
+ },
107
+ {
108
+ "epoch": 0.0023321711347178654,
109
+ "eval_loss": 0.9646629691123962,
110
+ "eval_runtime": 1127.0671,
111
+ "eval_samples_per_second": 16.019,
112
+ "eval_steps_per_second": 4.005,
113
+ "step": 25
114
  }
115
  ],
116
  "logging_steps": 3,
 
130
  "attributes": {}
131
  }
132
  },
133
+ "total_flos": 3.048173869203456e+16,
134
  "train_batch_size": 4,
135
  "trial_name": null,
136
  "trial_params": null