ngocquangt2k46 commited on
Commit
31a5406
·
verified ·
1 Parent(s): 02d02af

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -21,12 +21,12 @@
21
  "revision": null,
22
  "target_modules": [
23
  "o_proj",
24
- "down_proj",
25
  "k_proj",
26
  "v_proj",
27
  "q_proj",
28
- "up_proj",
29
- "gate_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
21
  "revision": null,
22
  "target_modules": [
23
  "o_proj",
 
24
  "k_proj",
25
  "v_proj",
26
  "q_proj",
27
+ "gate_proj",
28
+ "down_proj",
29
+ "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c129f22828d9a11a70b1b6a912223b1c18badbd13ded6a14546603a09d84074b
3
  size 50624
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ffdcf9f926d8be928a0034929041b9c7f54b4c50c234ff9ef11d235f1552cb0
3
  size 50624
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:08421e6f0b59e1bd7f90d8d5d4a01b9ed4e59046adf26cfad2d2fdd7067acd3f
3
  size 111142
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:297b9ecd3a6c7081a6ffcb673aa55e7d02260ef54fa8d65a57beac6937393a81
3
  size 111142
last-checkpoint/trainer_state.json CHANGED
@@ -10,7 +10,7 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.0003251239535072746,
13
- "grad_norm": 0.021640103310346603,
14
  "learning_rate": 1e-05,
15
  "loss": 11.7645,
16
  "step": 1
@@ -18,174 +18,174 @@
18
  {
19
  "epoch": 0.0003251239535072746,
20
  "eval_loss": 11.76447582244873,
21
- "eval_runtime": 249.3031,
22
- "eval_samples_per_second": 41.56,
23
- "eval_steps_per_second": 20.782,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.0006502479070145493,
28
- "grad_norm": 0.024706723168492317,
29
  "learning_rate": 2e-05,
30
  "loss": 11.7658,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.000975371860521824,
35
- "grad_norm": 0.022929754108190536,
36
  "learning_rate": 3e-05,
37
  "loss": 11.7649,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.0013004958140290985,
42
- "grad_norm": 0.022171195596456528,
43
  "learning_rate": 4e-05,
44
  "loss": 11.7648,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.0016256197675363732,
49
- "grad_norm": 0.02208411693572998,
50
  "learning_rate": 5e-05,
51
  "loss": 11.7649,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.0016256197675363732,
56
- "eval_loss": 11.764402389526367,
57
- "eval_runtime": 249.1579,
58
- "eval_samples_per_second": 41.584,
59
- "eval_steps_per_second": 20.794,
60
  "step": 5
61
  },
62
  {
63
  "epoch": 0.001950743721043648,
64
- "grad_norm": 0.02443164400756359,
65
  "learning_rate": 6e-05,
66
  "loss": 11.7634,
67
  "step": 6
68
  },
69
  {
70
  "epoch": 0.0022758676745509225,
71
- "grad_norm": 0.023470092564821243,
72
  "learning_rate": 7e-05,
73
  "loss": 11.7654,
74
  "step": 7
75
  },
76
  {
77
  "epoch": 0.002600991628058197,
78
- "grad_norm": 0.023716744035482407,
79
  "learning_rate": 8e-05,
80
  "loss": 11.7644,
81
  "step": 8
82
  },
83
  {
84
  "epoch": 0.002926115581565472,
85
- "grad_norm": 0.022970277816057205,
86
  "learning_rate": 9e-05,
87
  "loss": 11.7658,
88
  "step": 9
89
  },
90
  {
91
  "epoch": 0.0032512395350727465,
92
- "grad_norm": 0.022244542837142944,
93
  "learning_rate": 0.0001,
94
  "loss": 11.7641,
95
  "step": 10
96
  },
97
  {
98
  "epoch": 0.0032512395350727465,
99
- "eval_loss": 11.764139175415039,
100
- "eval_runtime": 248.9949,
101
- "eval_samples_per_second": 41.611,
102
- "eval_steps_per_second": 20.808,
103
  "step": 10
104
  },
105
  {
106
  "epoch": 0.003576363488580021,
107
- "grad_norm": 0.024796368554234505,
108
  "learning_rate": 9.755282581475769e-05,
109
- "loss": 11.7653,
110
  "step": 11
111
  },
112
  {
113
  "epoch": 0.003901487442087296,
114
- "grad_norm": 0.024691808968782425,
115
  "learning_rate": 9.045084971874738e-05,
116
- "loss": 11.7642,
117
  "step": 12
118
  },
119
  {
120
  "epoch": 0.00422661139559457,
121
- "grad_norm": 0.026635609567165375,
122
  "learning_rate": 7.938926261462366e-05,
123
- "loss": 11.7643,
124
  "step": 13
125
  },
126
  {
127
  "epoch": 0.004551735349101845,
128
- "grad_norm": 0.024080852046608925,
129
  "learning_rate": 6.545084971874738e-05,
130
- "loss": 11.7639,
131
  "step": 14
132
  },
133
  {
134
  "epoch": 0.00487685930260912,
135
- "grad_norm": 0.021976996213197708,
136
  "learning_rate": 5e-05,
137
  "loss": 11.763,
138
  "step": 15
139
  },
140
  {
141
  "epoch": 0.00487685930260912,
142
- "eval_loss": 11.7637939453125,
143
- "eval_runtime": 248.9817,
144
- "eval_samples_per_second": 41.613,
145
- "eval_steps_per_second": 20.809,
146
  "step": 15
147
  },
148
  {
149
  "epoch": 0.005201983256116394,
150
- "grad_norm": 0.021976066753268242,
151
  "learning_rate": 3.4549150281252636e-05,
152
  "loss": 11.7639,
153
  "step": 16
154
  },
155
  {
156
  "epoch": 0.005527107209623669,
157
- "grad_norm": 0.0250637736171484,
158
  "learning_rate": 2.061073738537635e-05,
159
- "loss": 11.7633,
160
  "step": 17
161
  },
162
  {
163
  "epoch": 0.005852231163130944,
164
- "grad_norm": 0.024703800678253174,
165
  "learning_rate": 9.549150281252633e-06,
166
- "loss": 11.7632,
167
  "step": 18
168
  },
169
  {
170
  "epoch": 0.006177355116638218,
171
- "grad_norm": 0.02371840551495552,
172
  "learning_rate": 2.4471741852423237e-06,
173
- "loss": 11.7637,
174
  "step": 19
175
  },
176
  {
177
  "epoch": 0.006502479070145493,
178
- "grad_norm": 0.027609504759311676,
179
  "learning_rate": 0.0,
180
- "loss": 11.764,
181
  "step": 20
182
  },
183
  {
184
  "epoch": 0.006502479070145493,
185
- "eval_loss": 11.763700485229492,
186
- "eval_runtime": 248.9974,
187
- "eval_samples_per_second": 41.611,
188
- "eval_steps_per_second": 20.807,
189
  "step": 20
190
  }
191
  ],
 
10
  "log_history": [
11
  {
12
  "epoch": 0.0003251239535072746,
13
+ "grad_norm": 0.024652473628520966,
14
  "learning_rate": 1e-05,
15
  "loss": 11.7645,
16
  "step": 1
 
18
  {
19
  "epoch": 0.0003251239535072746,
20
  "eval_loss": 11.76447582244873,
21
+ "eval_runtime": 276.626,
22
+ "eval_samples_per_second": 37.455,
23
+ "eval_steps_per_second": 18.729,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.0006502479070145493,
28
+ "grad_norm": 0.027672940865159035,
29
  "learning_rate": 2e-05,
30
  "loss": 11.7658,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.000975371860521824,
35
+ "grad_norm": 0.025643622502684593,
36
  "learning_rate": 3e-05,
37
  "loss": 11.7649,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.0013004958140290985,
42
+ "grad_norm": 0.025678519159555435,
43
  "learning_rate": 4e-05,
44
  "loss": 11.7648,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.0016256197675363732,
49
+ "grad_norm": 0.02572811394929886,
50
  "learning_rate": 5e-05,
51
  "loss": 11.7649,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.0016256197675363732,
56
+ "eval_loss": 11.764395713806152,
57
+ "eval_runtime": 276.5749,
58
+ "eval_samples_per_second": 37.462,
59
+ "eval_steps_per_second": 18.733,
60
  "step": 5
61
  },
62
  {
63
  "epoch": 0.001950743721043648,
64
+ "grad_norm": 0.026358768343925476,
65
  "learning_rate": 6e-05,
66
  "loss": 11.7634,
67
  "step": 6
68
  },
69
  {
70
  "epoch": 0.0022758676745509225,
71
+ "grad_norm": 0.026500564068555832,
72
  "learning_rate": 7e-05,
73
  "loss": 11.7654,
74
  "step": 7
75
  },
76
  {
77
  "epoch": 0.002600991628058197,
78
+ "grad_norm": 0.02642131596803665,
79
  "learning_rate": 8e-05,
80
  "loss": 11.7644,
81
  "step": 8
82
  },
83
  {
84
  "epoch": 0.002926115581565472,
85
+ "grad_norm": 0.02585265040397644,
86
  "learning_rate": 9e-05,
87
  "loss": 11.7658,
88
  "step": 9
89
  },
90
  {
91
  "epoch": 0.0032512395350727465,
92
+ "grad_norm": 0.024854907765984535,
93
  "learning_rate": 0.0001,
94
  "loss": 11.7641,
95
  "step": 10
96
  },
97
  {
98
  "epoch": 0.0032512395350727465,
99
+ "eval_loss": 11.76411247253418,
100
+ "eval_runtime": 276.5048,
101
+ "eval_samples_per_second": 37.471,
102
+ "eval_steps_per_second": 18.737,
103
  "step": 10
104
  },
105
  {
106
  "epoch": 0.003576363488580021,
107
+ "grad_norm": 0.02757527120411396,
108
  "learning_rate": 9.755282581475769e-05,
109
+ "loss": 11.7652,
110
  "step": 11
111
  },
112
  {
113
  "epoch": 0.003901487442087296,
114
+ "grad_norm": 0.028088003396987915,
115
  "learning_rate": 9.045084971874738e-05,
116
+ "loss": 11.7641,
117
  "step": 12
118
  },
119
  {
120
  "epoch": 0.00422661139559457,
121
+ "grad_norm": 0.02876427210867405,
122
  "learning_rate": 7.938926261462366e-05,
123
+ "loss": 11.7642,
124
  "step": 13
125
  },
126
  {
127
  "epoch": 0.004551735349101845,
128
+ "grad_norm": 0.027249282225966454,
129
  "learning_rate": 6.545084971874738e-05,
130
+ "loss": 11.7638,
131
  "step": 14
132
  },
133
  {
134
  "epoch": 0.00487685930260912,
135
+ "grad_norm": 0.025040265172719955,
136
  "learning_rate": 5e-05,
137
  "loss": 11.763,
138
  "step": 15
139
  },
140
  {
141
  "epoch": 0.00487685930260912,
142
+ "eval_loss": 11.763741493225098,
143
+ "eval_runtime": 276.3837,
144
+ "eval_samples_per_second": 37.488,
145
+ "eval_steps_per_second": 18.746,
146
  "step": 15
147
  },
148
  {
149
  "epoch": 0.005201983256116394,
150
+ "grad_norm": 0.024846818298101425,
151
  "learning_rate": 3.4549150281252636e-05,
152
  "loss": 11.7639,
153
  "step": 16
154
  },
155
  {
156
  "epoch": 0.005527107209623669,
157
+ "grad_norm": 0.027951408177614212,
158
  "learning_rate": 2.061073738537635e-05,
159
+ "loss": 11.7632,
160
  "step": 17
161
  },
162
  {
163
  "epoch": 0.005852231163130944,
164
+ "grad_norm": 0.02765224315226078,
165
  "learning_rate": 9.549150281252633e-06,
166
+ "loss": 11.7631,
167
  "step": 18
168
  },
169
  {
170
  "epoch": 0.006177355116638218,
171
+ "grad_norm": 0.027219220995903015,
172
  "learning_rate": 2.4471741852423237e-06,
173
+ "loss": 11.7636,
174
  "step": 19
175
  },
176
  {
177
  "epoch": 0.006502479070145493,
178
+ "grad_norm": 0.031280361115932465,
179
  "learning_rate": 0.0,
180
+ "loss": 11.7639,
181
  "step": 20
182
  },
183
  {
184
  "epoch": 0.006502479070145493,
185
+ "eval_loss": 11.763640403747559,
186
+ "eval_runtime": 276.2913,
187
+ "eval_samples_per_second": 37.5,
188
+ "eval_steps_per_second": 18.752,
189
  "step": 20
190
  }
191
  ],
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a413dd30c82bf1458b4c9aa8cd9fc6df506b56de5b15f10d2e2a254ff755ea0
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e17a6c83dec956fb9efedd2c17093957afde82b766a4186ec8d108b6cbea110d
3
  size 6776