nlparabic commited on
Commit
98ff9be
1 Parent(s): 40507e2

Training in progress, epoch 10

Browse files
egy_training_log.txt CHANGED
@@ -164,3 +164,148 @@ INFO:root:Epoch 9.0: Train Loss = 0.1285, Eval Loss = 0.4447513520717621
164
  INFO:absl:Using default tokenizer.
165
  INFO:__main__:*** Evaluate ***
166
  INFO:absl:Using default tokenizer.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  INFO:absl:Using default tokenizer.
165
  INFO:__main__:*** Evaluate ***
166
  INFO:absl:Using default tokenizer.
167
+ WARNING:__main__:Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, 16-bits training: False
168
+ INFO:__main__:Training/evaluation parameters TrainingArguments(
169
+ _n_gpu=1,
170
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
171
+ adafactor=False,
172
+ adam_beta1=0.9,
173
+ adam_beta2=0.999,
174
+ adam_epsilon=1e-08,
175
+ auto_find_batch_size=False,
176
+ batch_eval_metrics=False,
177
+ bf16=False,
178
+ bf16_full_eval=False,
179
+ data_seed=None,
180
+ dataloader_drop_last=False,
181
+ dataloader_num_workers=0,
182
+ dataloader_persistent_workers=False,
183
+ dataloader_pin_memory=True,
184
+ dataloader_prefetch_factor=None,
185
+ ddp_backend=None,
186
+ ddp_broadcast_buffers=None,
187
+ ddp_bucket_cap_mb=None,
188
+ ddp_find_unused_parameters=None,
189
+ ddp_timeout=1800,
190
+ debug=[],
191
+ deepspeed=None,
192
+ disable_tqdm=False,
193
+ dispatch_batches=None,
194
+ do_eval=True,
195
+ do_predict=False,
196
+ do_train=True,
197
+ eval_accumulation_steps=None,
198
+ eval_delay=0,
199
+ eval_do_concat_batches=True,
200
+ eval_on_start=False,
201
+ eval_steps=None,
202
+ eval_strategy=IntervalStrategy.EPOCH,
203
+ eval_use_gather_object=False,
204
+ evaluation_strategy=epoch,
205
+ fp16=False,
206
+ fp16_backend=auto,
207
+ fp16_full_eval=False,
208
+ fp16_opt_level=O1,
209
+ fsdp=[],
210
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
211
+ fsdp_min_num_params=0,
212
+ fsdp_transformer_layer_cls_to_wrap=None,
213
+ full_determinism=False,
214
+ gradient_accumulation_steps=1,
215
+ gradient_checkpointing=False,
216
+ gradient_checkpointing_kwargs=None,
217
+ greater_is_better=False,
218
+ group_by_length=False,
219
+ half_precision_backend=auto,
220
+ hub_always_push=False,
221
+ hub_model_id=None,
222
+ hub_private_repo=False,
223
+ hub_strategy=HubStrategy.EVERY_SAVE,
224
+ hub_token=<HUB_TOKEN>,
225
+ ignore_data_skip=False,
226
+ include_inputs_for_metrics=False,
227
+ include_num_input_tokens_seen=False,
228
+ include_tokens_per_second=False,
229
+ jit_mode_eval=False,
230
+ label_names=None,
231
+ label_smoothing_factor=0.0,
232
+ learning_rate=5e-05,
233
+ length_column_name=length,
234
+ load_best_model_at_end=True,
235
+ local_rank=0,
236
+ log_level=passive,
237
+ log_level_replica=warning,
238
+ log_on_each_node=True,
239
+ logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_gulf/runs/Sep01_08-33-27_lmgpu-node-09,
240
+ logging_first_step=False,
241
+ logging_nan_inf_filter=True,
242
+ logging_steps=500,
243
+ logging_strategy=IntervalStrategy.EPOCH,
244
+ lr_scheduler_kwargs={},
245
+ lr_scheduler_type=SchedulerType.LINEAR,
246
+ max_grad_norm=1.0,
247
+ max_steps=-1,
248
+ metric_for_best_model=loss,
249
+ mp_parameters=,
250
+ neftune_noise_alpha=None,
251
+ no_cuda=False,
252
+ num_train_epochs=20.0,
253
+ optim=OptimizerNames.ADAMW_TORCH,
254
+ optim_args=None,
255
+ optim_target_modules=None,
256
+ output_dir=/home/iais_marenpielka/Bouthaina/res_nw_gulf,
257
+ overwrite_output_dir=False,
258
+ past_index=-1,
259
+ per_device_eval_batch_size=8,
260
+ per_device_train_batch_size=8,
261
+ prediction_loss_only=False,
262
+ push_to_hub=True,
263
+ push_to_hub_model_id=None,
264
+ push_to_hub_organization=None,
265
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
266
+ ray_scope=last,
267
+ remove_unused_columns=True,
268
+ report_to=[],
269
+ restore_callback_states_from_checkpoint=False,
270
+ resume_from_checkpoint=None,
271
+ run_name=/home/iais_marenpielka/Bouthaina/res_nw_gulf,
272
+ save_on_each_node=False,
273
+ save_only_model=False,
274
+ save_safetensors=True,
275
+ save_steps=500,
276
+ save_strategy=IntervalStrategy.EPOCH,
277
+ save_total_limit=None,
278
+ seed=42,
279
+ skip_memory_metrics=True,
280
+ split_batches=None,
281
+ tf32=None,
282
+ torch_compile=False,
283
+ torch_compile_backend=None,
284
+ torch_compile_mode=None,
285
+ torch_empty_cache_steps=None,
286
+ torchdynamo=None,
287
+ tpu_metrics_debug=False,
288
+ tpu_num_cores=None,
289
+ use_cpu=False,
290
+ use_ipex=False,
291
+ use_legacy_prediction_loop=False,
292
+ use_mps_device=False,
293
+ warmup_ratio=0.0,
294
+ warmup_steps=500,
295
+ weight_decay=0.0,
296
+ )
297
+ INFO:__main__:Checkpoint detected, resuming training at /home/iais_marenpielka/Bouthaina/res_nw_gulf/checkpoint-7542. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.
298
+ INFO:datasets.builder:Using custom data configuration default-2b7e5225e15b58f2
299
+ INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
300
+ INFO:datasets.builder:Overwrite dataset info from restored data version if exists.
301
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-2b7e5225e15b58f2/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
302
+ INFO:datasets.builder:Found cached dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-2b7e5225e15b58f2/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
303
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-2b7e5225e15b58f2/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
304
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-2b7e5225e15b58f2/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-0e6f713a2cf90561.arrow
305
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-2b7e5225e15b58f2/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-7a1aa08ee5aedefb.arrow
306
+ WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=768 instead. You can change that default value by passing --block_size xxx.
307
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-2b7e5225e15b58f2/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-543361b7d3d473c7.arrow
308
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-2b7e5225e15b58f2/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-17e90e37607440cd.arrow
309
+ WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
310
+ INFO:root:Epoch 10.0: Train Loss = 0.1152, Eval Loss = 0.45251065492630005
311
+ INFO:absl:Using default tokenizer.
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fd9152205ce7dd7a48fabc774ab935bf733b9665f001b210dac23ba9b01cb8f4
3
  size 539221632
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80990c9feba6378f58de72419e79571dfb5ca38682cf623e6953cb4917053597
3
  size 539221632
special_tokens_map.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "additional_special_tokens": [
3
  {
4
- "content": "[s]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
 
1
  {
2
  "additional_special_tokens": [
3
  {
4
+ "content": "<s>",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
tokenizer.json CHANGED
@@ -46,7 +46,7 @@
46
  },
47
  {
48
  "id": 64002,
49
- "content": "[s]",
50
  "single_word": false,
51
  "lstrip": false,
52
  "rstrip": false,
 
46
  },
47
  {
48
  "id": 64002,
49
+ "content": "<s>",
50
  "single_word": false,
51
  "lstrip": false,
52
  "rstrip": false,
tokenizer_config.json CHANGED
@@ -25,7 +25,7 @@
25
  "special": true
26
  },
27
  "64002": {
28
- "content": "[s]",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
@@ -50,7 +50,7 @@
50
  }
51
  },
52
  "additional_special_tokens": [
53
- "[s]",
54
  "</s>",
55
  "[sep]"
56
  ],
 
25
  "special": true
26
  },
27
  "64002": {
28
+ "content": "<s>",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
 
50
  }
51
  },
52
  "additional_special_tokens": [
53
+ "<s>",
54
  "</s>",
55
  "[sep]"
56
  ],
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9180ed0af43b2dfd01b6f5728a25d01a4ef553e3f335f669e25a43c50ec0408f
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13a23bd84ac51d4dab23e5e385caaefed9744111cb6c9e85bb82ba98c6773b5c
3
  size 5240