ngocquangt2k46 commited on
Commit
171d198
·
verified ·
1 Parent(s): 316f2cc

Training in progress, step 15, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:602013ae87516812205f0ff9a7f72b136876fd4928114777f8049f7b9aec5d98
3
  size 159967880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37712096092d8d16805f1851c0cb5487c1de2a7e290293d6cb6687f076ec6366
3
  size 159967880
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f9feed44f4cb2419614ccf223f5ac83b625a148c67b92e9b20111e9f28c32b1
3
  size 81730196
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76ca88a3291a3f98680748610a53429c8f18e90fc183970e1796d919e06c73aa
3
  size 81730196
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a7eaefb7c8d3ff9aefbd8df87422c4e9c44a463b8ccfc3f0db11252f9faa01b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c56893ec02e2f131eda7c55d301561be28b6d224e001718380f1874157a5126
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4849a6ac0a1d895740f1ab4eba9d346b8d898008d0cfe93dd108cd928d7c63e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff2736979009751c0c6b0ddcc5f6544d6f723aa752b4798eab0b70fb76cf0083
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.03955500618046971,
5
  "eval_steps": 5,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -101,6 +101,49 @@
101
  "eval_samples_per_second": 2.77,
102
  "eval_steps_per_second": 1.385,
103
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  }
105
  ],
106
  "logging_steps": 1,
@@ -120,7 +163,7 @@
120
  "attributes": {}
121
  }
122
  },
123
- "total_flos": 1.0455331230449664e+17,
124
  "train_batch_size": 2,
125
  "trial_name": null,
126
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.059332509270704575,
5
  "eval_steps": 5,
6
+ "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
101
  "eval_samples_per_second": 2.77,
102
  "eval_steps_per_second": 1.385,
103
  "step": 10
104
+ },
105
+ {
106
+ "epoch": 0.04351050679851669,
107
+ "grad_norm": 72.24517059326172,
108
+ "learning_rate": 9.755282581475769e-05,
109
+ "loss": 86.7828,
110
+ "step": 11
111
+ },
112
+ {
113
+ "epoch": 0.047466007416563656,
114
+ "grad_norm": 75.16802215576172,
115
+ "learning_rate": 9.045084971874738e-05,
116
+ "loss": 90.0533,
117
+ "step": 12
118
+ },
119
+ {
120
+ "epoch": 0.05142150803461063,
121
+ "grad_norm": 66.52326965332031,
122
+ "learning_rate": 7.938926261462366e-05,
123
+ "loss": 85.0806,
124
+ "step": 13
125
+ },
126
+ {
127
+ "epoch": 0.0553770086526576,
128
+ "grad_norm": 68.9215316772461,
129
+ "learning_rate": 6.545084971874738e-05,
130
+ "loss": 77.0213,
131
+ "step": 14
132
+ },
133
+ {
134
+ "epoch": 0.059332509270704575,
135
+ "grad_norm": 59.450138092041016,
136
+ "learning_rate": 5e-05,
137
+ "loss": 75.1714,
138
+ "step": 15
139
+ },
140
+ {
141
+ "epoch": 0.059332509270704575,
142
+ "eval_loss": 2.192617893218994,
143
+ "eval_runtime": 307.582,
144
+ "eval_samples_per_second": 2.77,
145
+ "eval_steps_per_second": 1.385,
146
+ "step": 15
147
  }
148
  ],
149
  "logging_steps": 1,
 
163
  "attributes": {}
164
  }
165
  },
166
+ "total_flos": 1.5682996845674496e+17,
167
  "train_batch_size": 2,
168
  "trial_name": null,
169
  "trial_params": null