diff --git "a/rqrag/ERROR.txt" "b/rqrag/ERROR.txt" new file mode 100644--- /dev/null +++ "b/rqrag/ERROR.txt" @@ -0,0 +1,2146 @@ +The following values were not passed to `accelerate launch` and had defaults used instead: + More than one GPU was found, enabling multi-GPU training. + If this was unintended please pass in `--num_processes=1`. + `--dynamo_backend` was set to a value of `'no'` +To avoid this warning pass in values for each of the problematic parameters or run `accelerate config`. +[2024-12-08 10:32:09,587] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect) +W1208 10:32:15.406000 140176667264832 torch/distributed/run.py:779] +W1208 10:32:15.406000 140176667264832 torch/distributed/run.py:779] ***************************************** +W1208 10:32:15.406000 140176667264832 torch/distributed/run.py:779] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W1208 10:32:15.406000 140176667264832 torch/distributed/run.py:779] ***************************************** +[2024-12-08 10:32:30,858] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2024-12-08 10:32:30,959] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2024-12-08 10:32:30,997] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2024-12-08 10:32:31,012] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2024-12-08 10:32:31,042] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2024-12-08 10:32:31,109] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2024-12-08 10:32:31,109] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2024-12-08 10:32:31,123] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2024-12-08 10:32:33,005] [INFO] [comm.py:652:init_distributed] cdb=None +12/08/2024 10:32:33 - INFO - __main__ - Distributed environment: DEEPSPEED Backend: nccl +Num processes: 8 +Process index: 4 +Local process index: 4 +Device: cuda:4 + +Mixed precision type: bf16 +ds_config: {'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'zero_allow_untested_optimizer': True, 'bf16': {'enabled': True}, 'zero_optimization': {'stage': 2, 'allgather_partitions': True, 'allgather_bucket_size': 200000000.0, 'overlap_comm': True, 'reduce_scatter': True, 'reduce_bucket_size': 'auto', 'contiguous_gradients': True, 'round_robin_gradients': True}, 'steps_per_print': inf, 'fp16': {'enabled': False}} + +[2024-12-08 10:32:33,196] [INFO] [comm.py:652:init_distributed] cdb=None +[2024-12-08 10:32:33,196] [INFO] [comm.py:683:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2024-12-08 10:32:33,212] [INFO] [comm.py:652:init_distributed] cdb=None +[2024-12-08 10:32:33,237] [INFO] [comm.py:652:init_distributed] cdb=None +[2024-12-08 10:32:33,257] [INFO] [comm.py:652:init_distributed] cdb=None +[2024-12-08 10:32:33,261] [INFO] [comm.py:652:init_distributed] cdb=None +[2024-12-08 10:32:33,311] [INFO] [comm.py:652:init_distributed] cdb=None +[2024-12-08 10:32:33,350] [INFO] [comm.py:652:init_distributed] cdb=None +Detected kernel version 4.19.91, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher. +12/08/2024 10:32:33 - INFO - __main__ - Distributed environment: DEEPSPEED Backend: nccl +Num processes: 8 +Process index: 0 +Local process index: 0 +Device: cuda:0 + +Mixed precision type: bf16 +ds_config: {'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'zero_allow_untested_optimizer': True, 'bf16': {'enabled': True}, 'zero_optimization': {'stage': 2, 'allgather_partitions': True, 'allgather_bucket_size': 200000000.0, 'overlap_comm': True, 'reduce_scatter': True, 'reduce_bucket_size': 'auto', 'contiguous_gradients': True, 'round_robin_gradients': True}, 'steps_per_print': inf, 'fp16': {'enabled': False}} + +12/08/2024 10:32:33 - INFO - __main__ - Distributed environment: DEEPSPEED Backend: nccl +Num processes: 8 +Process index: 6 +Local process index: 6 +Device: cuda:6 + +Mixed precision type: bf16 +ds_config: {'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'zero_allow_untested_optimizer': True, 'bf16': {'enabled': True}, 'zero_optimization': {'stage': 2, 'allgather_partitions': True, 'allgather_bucket_size': 200000000.0, 'overlap_comm': True, 'reduce_scatter': True, 'reduce_bucket_size': 'auto', 'contiguous_gradients': True, 'round_robin_gradients': True}, 'steps_per_print': inf, 'fp16': {'enabled': False}} + +12/08/2024 10:32:33 - INFO - __main__ - Distributed environment: DEEPSPEED Backend: nccl +Num processes: 8 +Process index: 2 +Local process index: 2 +Device: cuda:2 + +Mixed precision type: bf16 +ds_config: {'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'zero_allow_untested_optimizer': True, 'bf16': {'enabled': True}, 'zero_optimization': {'stage': 2, 'allgather_partitions': True, 'allgather_bucket_size': 200000000.0, 'overlap_comm': True, 'reduce_scatter': True, 'reduce_bucket_size': 'auto', 'contiguous_gradients': True, 'round_robin_gradients': True}, 'steps_per_print': inf, 'fp16': {'enabled': False}} + +12/08/2024 10:32:33 - INFO - __main__ - Distributed environment: DEEPSPEED Backend: nccl +Num processes: 8 +Process index: 1 +Local process index: 1 +Device: cuda:1 + +Mixed precision type: bf16 +ds_config: {'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'zero_allow_untested_optimizer': True, 'bf16': {'enabled': True}, 'zero_optimization': {'stage': 2, 'allgather_partitions': True, 'allgather_bucket_size': 200000000.0, 'overlap_comm': True, 'reduce_scatter': True, 'reduce_bucket_size': 'auto', 'contiguous_gradients': True, 'round_robin_gradients': True}, 'steps_per_print': inf, 'fp16': {'enabled': False}} + +12/08/2024 10:32:33 - INFO - __main__ - Distributed environment: DEEPSPEED Backend: nccl +Num processes: 8 +Process index: 5 +Local process index: 5 +Device: cuda:5 + +Mixed precision type: bf16 +ds_config: {'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'zero_allow_untested_optimizer': True, 'bf16': {'enabled': True}, 'zero_optimization': {'stage': 2, 'allgather_partitions': True, 'allgather_bucket_size': 200000000.0, 'overlap_comm': True, 'reduce_scatter': True, 'reduce_bucket_size': 'auto', 'contiguous_gradients': True, 'round_robin_gradients': True}, 'steps_per_print': inf, 'fp16': {'enabled': False}} + +12/08/2024 10:32:33 - INFO - __main__ - Distributed environment: DEEPSPEED Backend: nccl +Num processes: 8 +Process index: 3 +Local process index: 3 +Device: cuda:3 + +Mixed precision type: bf16 +ds_config: {'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'zero_allow_untested_optimizer': True, 'bf16': {'enabled': True}, 'zero_optimization': {'stage': 2, 'allgather_partitions': True, 'allgather_bucket_size': 200000000.0, 'overlap_comm': True, 'reduce_scatter': True, 'reduce_bucket_size': 'auto', 'contiguous_gradients': True, 'round_robin_gradients': True}, 'steps_per_print': inf, 'fp16': {'enabled': False}} + +12/08/2024 10:32:33 - INFO - __main__ - Distributed environment: DEEPSPEED Backend: nccl +Num processes: 8 +Process index: 7 +Local process index: 7 +Device: cuda:7 + +Mixed precision type: bf16 +ds_config: {'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'zero_allow_untested_optimizer': True, 'bf16': {'enabled': True}, 'zero_optimization': {'stage': 2, 'allgather_partitions': True, 'allgather_bucket_size': 200000000.0, 'overlap_comm': True, 'reduce_scatter': True, 'reduce_bucket_size': 'auto', 'contiguous_gradients': True, 'round_robin_gradients': True}, 'steps_per_print': inf, 'fp16': {'enabled': False}} + +loading configuration file /share/qhj/LLMs/Meta-Llama-3.1-8B-Instruct/config.json +Model config LlamaConfig { + "_name_or_path": "/share/qhj/LLMs/Meta-Llama-3.1-8B-Instruct", + "architectures": [ + "LlamaForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 128000, + "eos_token_id": [ + 128001, + 128008, + 128009 + ], + "head_dim": 128, + "hidden_act": "silu", + "hidden_size": 4096, + "initializer_range": 0.02, + "intermediate_size": 14336, + "max_position_embeddings": 131072, + "mlp_bias": false, + "model_type": "llama", + "num_attention_heads": 32, + "num_hidden_layers": 32, + "num_key_value_heads": 8, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_scaling": { + "factor": 8.0, + "high_freq_factor": 4.0, + "low_freq_factor": 1.0, + "original_max_position_embeddings": 8192, + "rope_type": "llama3" + }, + "rope_theta": 500000.0, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.46.2", + "use_cache": true, + "vocab_size": 128256 +} + +loading file tokenizer.json +loading file tokenizer.model +loading file added_tokens.json +loading file special_tokens_map.json +loading file tokenizer_config.json +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. + Loading checkpoint shards: 0%| | 0/4 [00:00 +[2024-12-08 10:34:56,997] [INFO] [logging.py:128:log_dist] [Rank 0] Creating torch.bfloat16 ZeRO stage 2 optimizer +[2024-12-08 10:34:56,997] [INFO] [stage_1_and_2.py:149:__init__] Reduce bucket size 16777216 +[2024-12-08 10:34:56,998] [INFO] [stage_1_and_2.py:150:__init__] Allgather bucket size 200000000 +[2024-12-08 10:34:56,998] [INFO] [stage_1_and_2.py:151:__init__] CPU Offload: False +[2024-12-08 10:34:56,998] [INFO] [stage_1_and_2.py:152:__init__] Round robin gradient partitioning: True +[2024-12-08 10:35:11,953] [INFO] [utils.py:781:see_memory_usage] Before initializing optimizer states +[2024-12-08 10:35:11,953] [INFO] [utils.py:782:see_memory_usage] MA 18.7 GB Max_MA 20.57 GB CA 20.57 GB Max_CA 21 GB +[2024-12-08 10:35:11,954] [INFO] [utils.py:789:see_memory_usage] CPU Virtual Memory: used = 19.46 GB, percent = 2.4% +[2024-12-08 10:35:12,106] [INFO] [utils.py:781:see_memory_usage] After initializing optimizer states +[2024-12-08 10:35:12,107] [INFO] [utils.py:782:see_memory_usage] MA 18.7 GB Max_MA 22.44 GB CA 24.31 GB Max_CA 24 GB +[2024-12-08 10:35:12,107] [INFO] [utils.py:789:see_memory_usage] CPU Virtual Memory: used = 19.46 GB, percent = 2.4% +[2024-12-08 10:35:12,107] [INFO] [stage_1_and_2.py:544:__init__] optimizer state initialized +[2024-12-08 10:35:12,249] [INFO] [utils.py:781:see_memory_usage] After initializing ZeRO optimizer +[2024-12-08 10:35:12,250] [INFO] [utils.py:782:see_memory_usage] MA 18.7 GB Max_MA 18.7 GB CA 24.31 GB Max_CA 24 GB +[2024-12-08 10:35:12,250] [INFO] [utils.py:789:see_memory_usage] CPU Virtual Memory: used = 19.46 GB, percent = 2.4% +[2024-12-08 10:35:12,251] [INFO] [logging.py:128:log_dist] [Rank 0] DeepSpeed Final Optimizer = DeepSpeedZeroOptimizer +[2024-12-08 10:35:12,251] [INFO] [logging.py:128:log_dist] [Rank 0] DeepSpeed using configured LR scheduler = None +[2024-12-08 10:35:12,251] [INFO] [logging.py:128:log_dist] [Rank 0] DeepSpeed LR Scheduler = None +[2024-12-08 10:35:12,252] [INFO] [logging.py:128:log_dist] [Rank 0] step=0, skipped=0, lr=[0.0], mom=[(0.9, 0.999)] +[2024-12-08 10:35:12,252] [INFO] [config.py:999:print] DeepSpeedEngine configuration: +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] activation_checkpointing_config { + "partition_activations": false, + "contiguous_memory_optimization": false, + "cpu_checkpointing": false, + "number_checkpoints": null, + "synchronize_checkpoint_boundary": false, + "profile": false +} +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'thread_count': 1, 'single_submit': False, 'overlap_events': True, 'use_gds': False} +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] amp_enabled .................. False +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] amp_params ................... False +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] autotuning_config ............ { + "enabled": false, + "start_step": null, + "end_step": null, + "metric_path": null, + "arg_mappings": null, + "metric": "throughput", + "model_info": null, + "results_dir": "autotuning_results", + "exps_dir": "autotuning_exps", + "overwrite": true, + "fast": true, + "start_profile_step": 3, + "end_profile_step": 5, + "tuner_type": "gridsearch", + "tuner_early_stopping": 5, + "tuner_num_trials": 50, + "model_info_path": null, + "mp_size": 1, + "max_train_batch_size": null, + "min_train_batch_size": 1, + "max_train_micro_batch_size_per_gpu": 1.024000e+03, + "min_train_micro_batch_size_per_gpu": 1, + "num_tuning_micro_batch_sizes": 3 +} +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] bfloat16_enabled ............. True +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] bfloat16_immediate_grad_update False +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] checkpoint_parallel_write_pipeline False +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] checkpoint_tag_validation_enabled True +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] checkpoint_tag_validation_fail False +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] comms_config ................. +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] communication_data_type ...... None +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] compression_config ........... {'weight_quantization': {'shared_parameters': {'enabled': False, 'quantizer_kernel': False, 'schedule_offset': 0, 'quantize_groups': 1, 'quantize_verbose': False, 'quantization_type': 'symmetric', 'quantize_weight_in_forward': False, 'rounding': 'nearest', 'fp16_mixed_quantize': False, 'quantize_change_ratio': 0.001}, 'different_groups': {}}, 'activation_quantization': {'shared_parameters': {'enabled': False, 'quantization_type': 'symmetric', 'range_calibration': 'dynamic', 'schedule_offset': 1000}, 'different_groups': {}}, 'sparse_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'row_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'head_pruning': {'shared_parameters': {'enabled': False, 'method': 'topk', 'schedule_offset': 1000}, 'different_groups': {}}, 'channel_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'layer_reduction': {'enabled': False}} +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] curriculum_enabled_legacy .... False +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] curriculum_params_legacy ..... False +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] data_efficiency_config ....... {'enabled': False, 'seed': 1234, 'data_sampling': {'enabled': False, 'num_epochs': 1000, 'num_workers': 0, 'curriculum_learning': {'enabled': False}}, 'data_routing': {'enabled': False, 'random_ltd': {'enabled': False, 'layer_token_lr_schedule': {'enabled': False}}}} +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] data_efficiency_enabled ...... False +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] dataloader_drop_last ......... False +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] disable_allgather ............ False +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] dump_state ................... False +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] dynamic_loss_scale_args ...... None +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] eigenvalue_enabled ........... False +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] eigenvalue_gas_boundary_resolution 1 +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] eigenvalue_layer_name ........ bert.encoder.layer +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] eigenvalue_layer_num ......... 0 +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] eigenvalue_max_iter .......... 100 +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] eigenvalue_stability ......... 1e-06 +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] eigenvalue_tol ............... 0.01 +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] eigenvalue_verbose ........... False +[2024-12-08 10:35:12,253] [INFO] [config.py:1003:print] elasticity_enabled ........... False +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] flops_profiler_config ........ { + "enabled": false, + "recompute_fwd_factor": 0.0, + "profile_step": 1, + "module_depth": -1, + "top_modules": 1, + "detailed": true, + "output_file": null +} +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] fp16_auto_cast ............... None +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] fp16_enabled ................. False +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] fp16_master_weights_and_gradients False +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] global_rank .................. 0 +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] grad_accum_dtype ............. None +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] gradient_accumulation_steps .. 4 +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] gradient_clipping ............ 1.0 +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] gradient_predivide_factor .... 1.0 +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] graph_harvesting ............. False +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] hybrid_engine ................ enabled=False max_out_tokens=512 inference_tp_size=1 release_inference_cache=False pin_parameters=True tp_gather_partition_size=8 +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] initial_dynamic_scale ........ 1 +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] load_universal_checkpoint .... False +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] loss_scale ................... 1.0 +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] memory_breakdown ............. False +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] mics_hierarchial_params_gather False +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] mics_shard_size .............. -1 +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] monitor_config ............... tensorboard=TensorBoardConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') comet=CometConfig(enabled=False, samples_log_interval=100, project=None, workspace=None, api_key=None, experiment_name=None, experiment_key=None, online=None, mode=None) wandb=WandbConfig(enabled=False, group=None, team=None, project='deepspeed') csv_monitor=CSVConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] nebula_config ................ { + "enabled": false, + "persistent_storage_path": null, + "persistent_time_interval": 100, + "num_of_version_in_retention": 2, + "enable_nebula_load": true, + "load_path": null +} +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] optimizer_legacy_fusion ...... False +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] optimizer_name ............... None +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] optimizer_params ............. None +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0, 'pipe_partitioned': True, 'grad_partitioned': True} +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] pld_enabled .................. False +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] pld_params ................... False +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] prescale_gradients ........... False +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] scheduler_name ............... None +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] scheduler_params ............. None +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] seq_parallel_communication_data_type torch.float32 +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] sparse_attention ............. None +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] sparse_gradients_enabled ..... False +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] steps_per_print .............. inf +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] timers_config ................ enabled=True synchronized=True +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] train_batch_size ............. 32 +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] train_micro_batch_size_per_gpu 1 +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] use_data_before_expert_parallel_ False +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] use_node_local_storage ....... False +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] wall_clock_breakdown ......... False +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] weight_quantization_config ... None +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] world_size ................... 8 +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] zero_allow_untested_optimizer True +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] zero_config .................. stage=2 contiguous_gradients=True reduce_scatter=True reduce_bucket_size=16777216 use_multi_rank_bucket_allreduce=True allgather_partitions=True allgather_bucket_size=200000000 overlap_comm=True load_from_fp32_weights=True elastic_checkpoint=False offload_param=None offload_optimizer=None sub_group_size=1000000000 cpu_offload_param=None cpu_offload_use_pin_memory=None cpu_offload=None prefetch_bucket_size=50000000 param_persistence_threshold=100000 model_persistence_threshold=9223372036854775807 max_live_parameters=1000000000 max_reuse_distance=1000000000 gather_16bit_weights_on_model_save=False use_all_reduce_for_fetch_params=False stage3_gather_fp16_weights_on_model_save=False ignore_unused_parameters=True legacy_stage1=False round_robin_gradients=True zero_hpz_partition_size=1 zero_quantized_weights=False zero_quantized_nontrainable_weights=False zero_quantized_gradients=False mics_shard_size=-1 mics_hierarchical_params_gather=False memory_efficient_linear=True pipeline_loading_checkpoint=False override_module_apply=True +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] zero_enabled ................. True +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] zero_force_ds_cpu_optimizer .. True +[2024-12-08 10:35:12,254] [INFO] [config.py:1003:print] zero_optimization_stage ...... 2 +[2024-12-08 10:35:12,254] [INFO] [config.py:989:print_user_config] json = { + "train_batch_size": 32, + "train_micro_batch_size_per_gpu": 1, + "gradient_accumulation_steps": 4, + "gradient_clipping": 1.0, + "zero_allow_untested_optimizer": true, + "bf16": { + "enabled": true + }, + "zero_optimization": { + "stage": 2, + "allgather_partitions": true, + "allgather_bucket_size": 2.000000e+08, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 1.677722e+07, + "contiguous_gradients": true, + "round_robin_gradients": true + }, + "steps_per_print": inf, + "fp16": { + "enabled": false + } +} +12/08/2024 10:35:12 - INFO - __main__ - ***** Running training ***** +12/08/2024 10:35:12 - INFO - __main__ - Num examples = 40821 +12/08/2024 10:35:12 - INFO - __main__ - Num Epochs = 1 +12/08/2024 10:35:12 - INFO - __main__ - Instantaneous batch size per device = 1 +12/08/2024 10:35:12 - INFO - __main__ - Total train batch size (w. parallel, distributed & accumulation) = 32 +12/08/2024 10:35:12 - INFO - __main__ - Gradient Accumulation steps = 4 +12/08/2024 10:35:12 - INFO - __main__ - Total optimization steps = 1276 + 0%| | 0/1276 [00:00