nlparabic commited on
Commit
fc18018
1 Parent(s): 538a5f3

Training in progress, step 2000

Browse files
Files changed (3) hide show
  1. egy_training_log.txt +280 -0
  2. model.safetensors +1 -1
  3. training_args.bin +1 -1
egy_training_log.txt CHANGED
@@ -442,3 +442,283 @@ INFO:absl:Using default tokenizer.
442
  INFO:root:Epoch 5.0: Train Loss = 2.619, Eval Loss = 2.6229476928710938
443
  INFO:__main__:*** Evaluate ***
444
  INFO:absl:Using default tokenizer.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442
  INFO:root:Epoch 5.0: Train Loss = 2.619, Eval Loss = 2.6229476928710938
443
  INFO:__main__:*** Evaluate ***
444
  INFO:absl:Using default tokenizer.
445
+ WARNING:__main__:Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, 16-bits training: False
446
+ INFO:__main__:Training/evaluation parameters TrainingArguments(
447
+ _n_gpu=1,
448
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
449
+ adafactor=False,
450
+ adam_beta1=0.9,
451
+ adam_beta2=0.999,
452
+ adam_epsilon=1e-08,
453
+ auto_find_batch_size=False,
454
+ batch_eval_metrics=False,
455
+ bf16=False,
456
+ bf16_full_eval=False,
457
+ data_seed=None,
458
+ dataloader_drop_last=False,
459
+ dataloader_num_workers=0,
460
+ dataloader_persistent_workers=False,
461
+ dataloader_pin_memory=True,
462
+ dataloader_prefetch_factor=None,
463
+ ddp_backend=None,
464
+ ddp_broadcast_buffers=None,
465
+ ddp_bucket_cap_mb=None,
466
+ ddp_find_unused_parameters=None,
467
+ ddp_timeout=1800,
468
+ debug=[],
469
+ deepspeed=None,
470
+ disable_tqdm=False,
471
+ dispatch_batches=None,
472
+ do_eval=True,
473
+ do_predict=False,
474
+ do_train=True,
475
+ eval_accumulation_steps=None,
476
+ eval_delay=0,
477
+ eval_do_concat_batches=True,
478
+ eval_on_start=False,
479
+ eval_steps=500,
480
+ eval_strategy=IntervalStrategy.STEPS,
481
+ eval_use_gather_object=False,
482
+ evaluation_strategy=steps,
483
+ fp16=False,
484
+ fp16_backend=auto,
485
+ fp16_full_eval=False,
486
+ fp16_opt_level=O1,
487
+ fsdp=[],
488
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
489
+ fsdp_min_num_params=0,
490
+ fsdp_transformer_layer_cls_to_wrap=None,
491
+ full_determinism=False,
492
+ gradient_accumulation_steps=1,
493
+ gradient_checkpointing=False,
494
+ gradient_checkpointing_kwargs=None,
495
+ greater_is_better=False,
496
+ group_by_length=False,
497
+ half_precision_backend=auto,
498
+ hub_always_push=False,
499
+ hub_model_id=None,
500
+ hub_private_repo=False,
501
+ hub_strategy=HubStrategy.EVERY_SAVE,
502
+ hub_token=<HUB_TOKEN>,
503
+ ignore_data_skip=False,
504
+ include_inputs_for_metrics=False,
505
+ include_num_input_tokens_seen=False,
506
+ include_tokens_per_second=False,
507
+ jit_mode_eval=False,
508
+ label_names=None,
509
+ label_smoothing_factor=0.0,
510
+ learning_rate=5e-05,
511
+ length_column_name=length,
512
+ load_best_model_at_end=True,
513
+ local_rank=0,
514
+ log_level=passive,
515
+ log_level_replica=warning,
516
+ log_on_each_node=True,
517
+ logging_dir=/home/iais_marenpielka/Bouthaina/results/runs/Aug25_12-29-57_lmgpu-node-09,
518
+ logging_first_step=False,
519
+ logging_nan_inf_filter=True,
520
+ logging_steps=500,
521
+ logging_strategy=IntervalStrategy.STEPS,
522
+ lr_scheduler_kwargs={},
523
+ lr_scheduler_type=SchedulerType.LINEAR,
524
+ max_grad_norm=1.0,
525
+ max_steps=-1,
526
+ metric_for_best_model=loss,
527
+ mp_parameters=,
528
+ neftune_noise_alpha=None,
529
+ no_cuda=False,
530
+ num_train_epochs=20.0,
531
+ optim=OptimizerNames.ADAMW_TORCH,
532
+ optim_args=None,
533
+ optim_target_modules=None,
534
+ output_dir=/home/iais_marenpielka/Bouthaina/results,
535
+ overwrite_output_dir=False,
536
+ past_index=-1,
537
+ per_device_eval_batch_size=8,
538
+ per_device_train_batch_size=8,
539
+ prediction_loss_only=False,
540
+ push_to_hub=True,
541
+ push_to_hub_model_id=None,
542
+ push_to_hub_organization=None,
543
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
544
+ ray_scope=last,
545
+ remove_unused_columns=True,
546
+ report_to=[],
547
+ restore_callback_states_from_checkpoint=False,
548
+ resume_from_checkpoint=None,
549
+ run_name=/home/iais_marenpielka/Bouthaina/results,
550
+ save_on_each_node=False,
551
+ save_only_model=False,
552
+ save_safetensors=True,
553
+ save_steps=500,
554
+ save_strategy=IntervalStrategy.STEPS,
555
+ save_total_limit=None,
556
+ seed=42,
557
+ skip_memory_metrics=True,
558
+ split_batches=None,
559
+ tf32=None,
560
+ torch_compile=False,
561
+ torch_compile_backend=None,
562
+ torch_compile_mode=None,
563
+ torch_empty_cache_steps=None,
564
+ torchdynamo=None,
565
+ tpu_metrics_debug=False,
566
+ tpu_num_cores=None,
567
+ use_cpu=False,
568
+ use_ipex=False,
569
+ use_legacy_prediction_loop=False,
570
+ use_mps_device=False,
571
+ warmup_ratio=0.0,
572
+ warmup_steps=500,
573
+ weight_decay=0.0,
574
+ )
575
+ INFO:__main__:Checkpoint detected, resuming training at /home/iais_marenpielka/Bouthaina/results/checkpoint-1595. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.
576
+ WARNING:__main__:Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, 16-bits training: False
577
+ INFO:__main__:Training/evaluation parameters TrainingArguments(
578
+ _n_gpu=1,
579
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
580
+ adafactor=False,
581
+ adam_beta1=0.9,
582
+ adam_beta2=0.999,
583
+ adam_epsilon=1e-08,
584
+ auto_find_batch_size=False,
585
+ batch_eval_metrics=False,
586
+ bf16=False,
587
+ bf16_full_eval=False,
588
+ data_seed=None,
589
+ dataloader_drop_last=False,
590
+ dataloader_num_workers=0,
591
+ dataloader_persistent_workers=False,
592
+ dataloader_pin_memory=True,
593
+ dataloader_prefetch_factor=None,
594
+ ddp_backend=None,
595
+ ddp_broadcast_buffers=None,
596
+ ddp_bucket_cap_mb=None,
597
+ ddp_find_unused_parameters=None,
598
+ ddp_timeout=1800,
599
+ debug=[],
600
+ deepspeed=None,
601
+ disable_tqdm=False,
602
+ dispatch_batches=None,
603
+ do_eval=True,
604
+ do_predict=False,
605
+ do_train=True,
606
+ eval_accumulation_steps=None,
607
+ eval_delay=0,
608
+ eval_do_concat_batches=True,
609
+ eval_on_start=False,
610
+ eval_steps=500,
611
+ eval_strategy=IntervalStrategy.STEPS,
612
+ eval_use_gather_object=False,
613
+ evaluation_strategy=steps,
614
+ fp16=False,
615
+ fp16_backend=auto,
616
+ fp16_full_eval=False,
617
+ fp16_opt_level=O1,
618
+ fsdp=[],
619
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
620
+ fsdp_min_num_params=0,
621
+ fsdp_transformer_layer_cls_to_wrap=None,
622
+ full_determinism=False,
623
+ gradient_accumulation_steps=1,
624
+ gradient_checkpointing=False,
625
+ gradient_checkpointing_kwargs=None,
626
+ greater_is_better=False,
627
+ group_by_length=False,
628
+ half_precision_backend=auto,
629
+ hub_always_push=False,
630
+ hub_model_id=None,
631
+ hub_private_repo=False,
632
+ hub_strategy=HubStrategy.EVERY_SAVE,
633
+ hub_token=<HUB_TOKEN>,
634
+ ignore_data_skip=False,
635
+ include_inputs_for_metrics=False,
636
+ include_num_input_tokens_seen=False,
637
+ include_tokens_per_second=False,
638
+ jit_mode_eval=False,
639
+ label_names=None,
640
+ label_smoothing_factor=0.0,
641
+ learning_rate=5e-05,
642
+ length_column_name=length,
643
+ load_best_model_at_end=True,
644
+ local_rank=0,
645
+ log_level=passive,
646
+ log_level_replica=warning,
647
+ log_on_each_node=True,
648
+ logging_dir=/home/iais_marenpielka/Bouthaina/results/runs/Aug25_12-33-08_lmgpu-node-09,
649
+ logging_first_step=False,
650
+ logging_nan_inf_filter=True,
651
+ logging_steps=500,
652
+ logging_strategy=IntervalStrategy.STEPS,
653
+ lr_scheduler_kwargs={},
654
+ lr_scheduler_type=SchedulerType.LINEAR,
655
+ max_grad_norm=1.0,
656
+ max_steps=-1,
657
+ metric_for_best_model=loss,
658
+ mp_parameters=,
659
+ neftune_noise_alpha=None,
660
+ no_cuda=False,
661
+ num_train_epochs=20.0,
662
+ optim=OptimizerNames.ADAMW_TORCH,
663
+ optim_args=None,
664
+ optim_target_modules=None,
665
+ output_dir=/home/iais_marenpielka/Bouthaina/results,
666
+ overwrite_output_dir=False,
667
+ past_index=-1,
668
+ per_device_eval_batch_size=8,
669
+ per_device_train_batch_size=8,
670
+ prediction_loss_only=False,
671
+ push_to_hub=True,
672
+ push_to_hub_model_id=None,
673
+ push_to_hub_organization=None,
674
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
675
+ ray_scope=last,
676
+ remove_unused_columns=True,
677
+ report_to=[],
678
+ restore_callback_states_from_checkpoint=False,
679
+ resume_from_checkpoint=None,
680
+ run_name=/home/iais_marenpielka/Bouthaina/results,
681
+ save_on_each_node=False,
682
+ save_only_model=False,
683
+ save_safetensors=True,
684
+ save_steps=500,
685
+ save_strategy=IntervalStrategy.STEPS,
686
+ save_total_limit=None,
687
+ seed=42,
688
+ skip_memory_metrics=True,
689
+ split_batches=None,
690
+ tf32=None,
691
+ torch_compile=False,
692
+ torch_compile_backend=None,
693
+ torch_compile_mode=None,
694
+ torch_empty_cache_steps=None,
695
+ torchdynamo=None,
696
+ tpu_metrics_debug=False,
697
+ tpu_num_cores=None,
698
+ use_cpu=False,
699
+ use_ipex=False,
700
+ use_legacy_prediction_loop=False,
701
+ use_mps_device=False,
702
+ warmup_ratio=0.0,
703
+ warmup_steps=500,
704
+ weight_decay=0.0,
705
+ )
706
+ INFO:__main__:Checkpoint detected, resuming training at /home/iais_marenpielka/Bouthaina/results/checkpoint-1595. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.
707
+ INFO:datasets.builder:Using custom data configuration default-c90064c89aa4f4b8
708
+ INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
709
+ INFO:datasets.builder:Generating dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-c90064c89aa4f4b8/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
710
+ INFO:datasets.builder:Downloading and preparing dataset text/default to /home/iais_marenpielka/.cache/huggingface/datasets/text/default-c90064c89aa4f4b8/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101...
711
+ INFO:datasets.download.download_manager:Downloading took 0.0 min
712
+ INFO:datasets.download.download_manager:Checksum Computation took 0.0 min
713
+ INFO:datasets.builder:Generating train split
714
+ INFO:datasets.builder:Generating validation split
715
+ INFO:datasets.utils.info_utils:Unable to verify splits sizes.
716
+ INFO:datasets.builder:Dataset text downloaded and prepared to /home/iais_marenpielka/.cache/huggingface/datasets/text/default-c90064c89aa4f4b8/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101. Subsequent calls will reuse this data.
717
+ INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-c90064c89aa4f4b8/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-c0ee57decace0f18.arrow
718
+ INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-c90064c89aa4f4b8/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-de0da9023cfd9691.arrow
719
+ WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=768 instead. You can change that default value by passing --block_size xxx.
720
+ INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-c90064c89aa4f4b8/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-8a18d3af082678db.arrow
721
+ INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-c90064c89aa4f4b8/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-840375b4a2e73fb1.arrow
722
+ WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
723
+ WARNING:root:Epoch 4.0: No losses recorded yet.
724
+ INFO:absl:Using default tokenizer.
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6bc210fd300ba6db0ea195ede5aa1e5c7469b4c97ae2abd88f2fbed47317d68d
3
  size 539218560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ffc5bcdb28a967dfd70a631c4a770d9c09c95c8e38d0c845dd9dcc947ef9a66
3
  size 539218560
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2031df67d3f7c6b87317a09678a7218f25c1c8016608a67706479562146840ba
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4938217fef18cb2bc5ba64dcf8f2cb4a40151cc226d2ee933baac6c912a5ba06
3
  size 5240