dimasik87 commited on
Commit
c9e0716
1 Parent(s): a641af6

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a9bcad8bdceb953e4041a2446f3ca0ac69dfd62bdc88a7053d8ccd908a1a7274
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c6338372961508d0e6e53f4b425dedb3b1d009925d6beed6c8fe137d15a8822
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b90c81f387e715b9f6d842a12d0ffcc53c28bfdd7183a5b426bb29a014eb37be
3
  size 168149074
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c43bc5142208879f6c21142d760abc96f24db666fb369b9d6d73834b699e85a2
3
  size 168149074
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9912bf78e7cc968894815632c1e4fcd3ff686a58ca2e14070632815f6c275f7f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca4ef0be595a40805daa15596cdef4ec088c37a282b79824e24568848d376f7f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb578e75c11a81e85dda67a691f96ba4793a02960f1409fd3e1511aac873491a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d605401690d7669ff16aeaca6820cbd8d0d605afe748c51045ce90888810a22
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0001693870939738424,
5
  "eval_steps": 5,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -101,6 +101,92 @@
101
  "eval_samples_per_second": 9.669,
102
  "eval_steps_per_second": 4.835,
103
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  }
105
  ],
106
  "logging_steps": 1,
@@ -120,7 +206,7 @@
120
  "attributes": {}
121
  }
122
  },
123
- "total_flos": 1.402135828758528e+16,
124
  "train_batch_size": 2,
125
  "trial_name": null,
126
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0003387741879476848,
5
  "eval_steps": 5,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
101
  "eval_samples_per_second": 9.669,
102
  "eval_steps_per_second": 4.835,
103
  "step": 10
104
+ },
105
+ {
106
+ "epoch": 0.00018632580337122663,
107
+ "grad_norm": 23.243389129638672,
108
+ "learning_rate": 0.0001996917333733128,
109
+ "loss": 17.2822,
110
+ "step": 11
111
+ },
112
+ {
113
+ "epoch": 0.00020326451276861087,
114
+ "grad_norm": 23.551620483398438,
115
+ "learning_rate": 0.00019876883405951377,
116
+ "loss": 16.8668,
117
+ "step": 12
118
+ },
119
+ {
120
+ "epoch": 0.0002202032221659951,
121
+ "grad_norm": 17.5562686920166,
122
+ "learning_rate": 0.00019723699203976766,
123
+ "loss": 15.4958,
124
+ "step": 13
125
+ },
126
+ {
127
+ "epoch": 0.00023714193156337935,
128
+ "grad_norm": 25.39556312561035,
129
+ "learning_rate": 0.00019510565162951537,
130
+ "loss": 17.6328,
131
+ "step": 14
132
+ },
133
+ {
134
+ "epoch": 0.0002540806409607636,
135
+ "grad_norm": 17.877641677856445,
136
+ "learning_rate": 0.0001923879532511287,
137
+ "loss": 15.1113,
138
+ "step": 15
139
+ },
140
+ {
141
+ "epoch": 0.0002540806409607636,
142
+ "eval_loss": 2.129117488861084,
143
+ "eval_runtime": 5125.7738,
144
+ "eval_samples_per_second": 9.699,
145
+ "eval_steps_per_second": 4.85,
146
+ "step": 15
147
+ },
148
+ {
149
+ "epoch": 0.0002710193503581478,
150
+ "grad_norm": 30.318897247314453,
151
+ "learning_rate": 0.0001891006524188368,
152
+ "loss": 17.8594,
153
+ "step": 16
154
+ },
155
+ {
156
+ "epoch": 0.00028795805975553206,
157
+ "grad_norm": 15.739501953125,
158
+ "learning_rate": 0.00018526401643540922,
159
+ "loss": 17.0346,
160
+ "step": 17
161
+ },
162
+ {
163
+ "epoch": 0.0003048967691529163,
164
+ "grad_norm": 24.859498977661133,
165
+ "learning_rate": 0.00018090169943749476,
166
+ "loss": 18.724,
167
+ "step": 18
168
+ },
169
+ {
170
+ "epoch": 0.00032183547855030054,
171
+ "grad_norm": 16.235403060913086,
172
+ "learning_rate": 0.0001760405965600031,
173
+ "loss": 15.4015,
174
+ "step": 19
175
+ },
176
+ {
177
+ "epoch": 0.0003387741879476848,
178
+ "grad_norm": 15.718273162841797,
179
+ "learning_rate": 0.00017071067811865476,
180
+ "loss": 17.4672,
181
+ "step": 20
182
+ },
183
+ {
184
+ "epoch": 0.0003387741879476848,
185
+ "eval_loss": 2.085993766784668,
186
+ "eval_runtime": 5126.7679,
187
+ "eval_samples_per_second": 9.697,
188
+ "eval_steps_per_second": 4.849,
189
+ "step": 20
190
  }
191
  ],
192
  "logging_steps": 1,
 
206
  "attributes": {}
207
  }
208
  },
209
+ "total_flos": 2.804271657517056e+16,
210
  "train_batch_size": 2,
211
  "trial_name": null,
212
  "trial_params": null