ardaspear commited on
Commit
697291f
·
verified ·
1 Parent(s): fce64b2

Training in progress, step 30, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:93c9beac45fa92f4c1db6ebc60286c832d9a4ef6548b0f4bf82825b5caf5f682
3
  size 2373352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9e1fc52df00af6f86bf334d17420bdc509fe71996afa01336eacfd633c3a683
3
  size 2373352
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a9949281865d7646ee69784eff59b573b03bdd5cbc01c30e8e046ae3c6a870a4
3
  size 4830714
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e99c8d5762ea8dc4877e91ceeb01f13fd3f27d53a22eb3e0a0533bf0fe2f459
3
  size 4830714
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2504f02dae5324e88c0367fb7870bab683b94c85f27b7a080401f4febb52ab77
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1af2d1ad71d0a525efd297d63ece1800652033d73e1aabfc1096f4a3269303a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6676fe28230ae15b45fb334c871c6fdf1a7984a935952b9f8650896c37a8c106
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d37b0dad6b9c48822da5c83a071d50252502799ad22c4c4907147ad4e4f8e2f4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.014738393515106854,
5
  "eval_steps": 5,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -111,6 +111,28 @@
111
  "eval_samples_per_second": 133.695,
112
  "eval_steps_per_second": 16.753,
113
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  }
115
  ],
116
  "logging_steps": 3,
@@ -130,7 +152,7 @@
130
  "attributes": {}
131
  }
132
  },
133
- "total_flos": 27544780800000.0,
134
  "train_batch_size": 8,
135
  "trial_name": null,
136
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.017686072218128224,
5
  "eval_steps": 5,
6
+ "global_step": 30,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
111
  "eval_samples_per_second": 133.695,
112
  "eval_steps_per_second": 16.753,
113
  "step": 25
114
+ },
115
+ {
116
+ "epoch": 0.0159174649963154,
117
+ "grad_norm": 0.444751113653183,
118
+ "learning_rate": 6.167226819279528e-05,
119
+ "loss": 11.8809,
120
+ "step": 27
121
+ },
122
+ {
123
+ "epoch": 0.017686072218128224,
124
+ "grad_norm": 0.4151949882507324,
125
+ "learning_rate": 5e-05,
126
+ "loss": 11.8796,
127
+ "step": 30
128
+ },
129
+ {
130
+ "epoch": 0.017686072218128224,
131
+ "eval_loss": 11.876304626464844,
132
+ "eval_runtime": 21.2507,
133
+ "eval_samples_per_second": 134.442,
134
+ "eval_steps_per_second": 16.846,
135
+ "step": 30
136
  }
137
  ],
138
  "logging_steps": 3,
 
152
  "attributes": {}
153
  }
154
  },
155
+ "total_flos": 33053736960000.0,
156
  "train_batch_size": 8,
157
  "trial_name": null,
158
  "trial_params": null