sunitha-ravi commited on
Commit
d8aea7e
1 Parent(s): 97e602e

Upload 4 files

Browse files
Files changed (4) hide show
  1. rng_state.pth +3 -0
  2. scheduler.pt +3 -0
  3. trainer_state.json +243 -0
  4. training_args.bin +3 -0
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec4c5fbd9a45011bb39fcec2f815f03af2d91aa99d68f3d16748ba6a7e2e9a0a
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce9e6eb1b18a7414a8509bc17f557c4aafa4a36bd93bb7af610b947cf7335cf2
3
+ size 627
trainer_state.json ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.344048653344918,
5
+ "eval_steps": 500,
6
+ "global_step": 15000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.1448016217781639,
13
+ "grad_norm": 26.427837371826172,
14
+ "learning_rate": 5e-05,
15
+ "loss": 0.4824,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.2896032435563278,
20
+ "grad_norm": 3.53263258934021,
21
+ "learning_rate": 4.850879809126156e-05,
22
+ "loss": 0.3412,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.43440486533449174,
27
+ "grad_norm": 1.9650359153747559,
28
+ "learning_rate": 4.7017596182523116e-05,
29
+ "loss": 0.3352,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.5792064871126557,
34
+ "grad_norm": 0.11715813726186752,
35
+ "learning_rate": 4.552639427378467e-05,
36
+ "loss": 0.2622,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 0.7240081088908196,
41
+ "grad_norm": 5.380560398101807,
42
+ "learning_rate": 4.403519236504623e-05,
43
+ "loss": 0.2591,
44
+ "step": 2500
45
+ },
46
+ {
47
+ "epoch": 0.8688097306689835,
48
+ "grad_norm": 0.3751641511917114,
49
+ "learning_rate": 4.2543990456307786e-05,
50
+ "loss": 0.2633,
51
+ "step": 3000
52
+ },
53
+ {
54
+ "epoch": 1.0136113524471475,
55
+ "grad_norm": 0.22207745909690857,
56
+ "learning_rate": 4.105278854756934e-05,
57
+ "loss": 0.2213,
58
+ "step": 3500
59
+ },
60
+ {
61
+ "epoch": 1.1584129742253113,
62
+ "grad_norm": 0.18233709037303925,
63
+ "learning_rate": 3.95615866388309e-05,
64
+ "loss": 0.2005,
65
+ "step": 4000
66
+ },
67
+ {
68
+ "epoch": 1.3032145960034751,
69
+ "grad_norm": 4.843414783477783,
70
+ "learning_rate": 3.8070384730092456e-05,
71
+ "loss": 0.1835,
72
+ "step": 4500
73
+ },
74
+ {
75
+ "epoch": 1.4480162177816391,
76
+ "grad_norm": 3.538057565689087,
77
+ "learning_rate": 3.657918282135401e-05,
78
+ "loss": 0.183,
79
+ "step": 5000
80
+ },
81
+ {
82
+ "epoch": 1.5928178395598032,
83
+ "grad_norm": 3.140592575073242,
84
+ "learning_rate": 3.508798091261557e-05,
85
+ "loss": 0.1718,
86
+ "step": 5500
87
+ },
88
+ {
89
+ "epoch": 1.737619461337967,
90
+ "grad_norm": 4.233497142791748,
91
+ "learning_rate": 3.3596779003877126e-05,
92
+ "loss": 0.1891,
93
+ "step": 6000
94
+ },
95
+ {
96
+ "epoch": 1.8824210831161308,
97
+ "grad_norm": 0.30157846212387085,
98
+ "learning_rate": 3.210557709513868e-05,
99
+ "loss": 0.176,
100
+ "step": 6500
101
+ },
102
+ {
103
+ "epoch": 2.027222704894295,
104
+ "grad_norm": 0.030697572976350784,
105
+ "learning_rate": 3.061437518640024e-05,
106
+ "loss": 0.1605,
107
+ "step": 7000
108
+ },
109
+ {
110
+ "epoch": 2.172024326672459,
111
+ "grad_norm": 5.498688220977783,
112
+ "learning_rate": 2.9123173277661797e-05,
113
+ "loss": 0.1294,
114
+ "step": 7500
115
+ },
116
+ {
117
+ "epoch": 2.3168259484506226,
118
+ "grad_norm": 10.377650260925293,
119
+ "learning_rate": 2.7631971368923353e-05,
120
+ "loss": 0.1177,
121
+ "step": 8000
122
+ },
123
+ {
124
+ "epoch": 2.4616275702287864,
125
+ "grad_norm": 0.05050520598888397,
126
+ "learning_rate": 2.614076946018491e-05,
127
+ "loss": 0.1517,
128
+ "step": 8500
129
+ },
130
+ {
131
+ "epoch": 2.6064291920069502,
132
+ "grad_norm": 0.008191225118935108,
133
+ "learning_rate": 2.4649567551446467e-05,
134
+ "loss": 0.107,
135
+ "step": 9000
136
+ },
137
+ {
138
+ "epoch": 2.7512308137851145,
139
+ "grad_norm": 5.880840301513672,
140
+ "learning_rate": 2.3158365642708024e-05,
141
+ "loss": 0.1282,
142
+ "step": 9500
143
+ },
144
+ {
145
+ "epoch": 2.8960324355632783,
146
+ "grad_norm": 0.02497878111898899,
147
+ "learning_rate": 2.166716373396958e-05,
148
+ "loss": 0.1484,
149
+ "step": 10000
150
+ },
151
+ {
152
+ "epoch": 3.040834057341442,
153
+ "grad_norm": 0.010482904501259327,
154
+ "learning_rate": 2.0175961825231137e-05,
155
+ "loss": 0.1116,
156
+ "step": 10500
157
+ },
158
+ {
159
+ "epoch": 3.1856356791196063,
160
+ "grad_norm": 0.02279495634138584,
161
+ "learning_rate": 1.8684759916492694e-05,
162
+ "loss": 0.0792,
163
+ "step": 11000
164
+ },
165
+ {
166
+ "epoch": 3.33043730089777,
167
+ "grad_norm": 0.010266830213367939,
168
+ "learning_rate": 1.719355800775425e-05,
169
+ "loss": 0.07,
170
+ "step": 11500
171
+ },
172
+ {
173
+ "epoch": 3.475238922675934,
174
+ "grad_norm": 4.286752223968506,
175
+ "learning_rate": 1.5702356099015807e-05,
176
+ "loss": 0.0891,
177
+ "step": 12000
178
+ },
179
+ {
180
+ "epoch": 3.6200405444540977,
181
+ "grad_norm": 0.03043985180556774,
182
+ "learning_rate": 1.4211154190277366e-05,
183
+ "loss": 0.0843,
184
+ "step": 12500
185
+ },
186
+ {
187
+ "epoch": 3.7648421662322615,
188
+ "grad_norm": 0.00584929995238781,
189
+ "learning_rate": 1.2719952281538922e-05,
190
+ "loss": 0.0819,
191
+ "step": 13000
192
+ },
193
+ {
194
+ "epoch": 3.909643788010426,
195
+ "grad_norm": 0.08148284256458282,
196
+ "learning_rate": 1.1228750372800479e-05,
197
+ "loss": 0.0773,
198
+ "step": 13500
199
+ },
200
+ {
201
+ "epoch": 4.05444540978859,
202
+ "grad_norm": 0.07685733586549759,
203
+ "learning_rate": 9.737548464062036e-06,
204
+ "loss": 0.0674,
205
+ "step": 14000
206
+ },
207
+ {
208
+ "epoch": 4.199247031566753,
209
+ "grad_norm": 0.0025194736663252115,
210
+ "learning_rate": 8.246346555323591e-06,
211
+ "loss": 0.042,
212
+ "step": 14500
213
+ },
214
+ {
215
+ "epoch": 4.344048653344918,
216
+ "grad_norm": 0.00301796430721879,
217
+ "learning_rate": 6.755144646585148e-06,
218
+ "loss": 0.0407,
219
+ "step": 15000
220
+ }
221
+ ],
222
+ "logging_steps": 500,
223
+ "max_steps": 17265,
224
+ "num_input_tokens_seen": 0,
225
+ "num_train_epochs": 5,
226
+ "save_steps": 5000,
227
+ "stateful_callbacks": {
228
+ "TrainerControl": {
229
+ "args": {
230
+ "should_epoch_stop": false,
231
+ "should_evaluate": false,
232
+ "should_log": false,
233
+ "should_save": true,
234
+ "should_training_stop": false
235
+ },
236
+ "attributes": {}
237
+ }
238
+ },
239
+ "total_flos": 6.313936604371354e+16,
240
+ "train_batch_size": 8,
241
+ "trial_name": null,
242
+ "trial_params": null
243
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f02fd966405337d64553c35ab1890418d19f21ca9a8980c839fdf67f4155bb2f
3
+ size 4731