|
{ |
|
"_frozen": true, |
|
"_n_gpu": 2, |
|
"adafactor": false, |
|
"adam_beta1": 0.9, |
|
"adam_beta2": 0.999, |
|
"adam_epsilon": 1e-06, |
|
"architectures": [ |
|
"InversionModel" |
|
], |
|
"auto_find_batch_size": false, |
|
"bf16": false, |
|
"bf16_full_eval": false, |
|
"cache_dir": null, |
|
"cheat_on_train_hypotheses": false, |
|
"config_name": null, |
|
"config_overrides": null, |
|
"corrector_ignore_hypothesis_embedding": false, |
|
"corrector_model_alias": null, |
|
"corrector_model_from_pretrained": null, |
|
"data_seed": null, |
|
"dataloader_drop_last": false, |
|
"dataloader_num_workers": 2, |
|
"dataloader_pin_memory": true, |
|
"dataset_name": "bias-bios", |
|
"ddp_backend": null, |
|
"ddp_broadcast_buffers": null, |
|
"ddp_bucket_cap_mb": null, |
|
"ddp_find_unused_parameters": false, |
|
"ddp_timeout": 1800, |
|
"debug": [], |
|
"decoder_dropout_disabled": false, |
|
"deepspeed": null, |
|
"deepspeed_plugin": null, |
|
"disable_tqdm": false, |
|
"dispatch_batches": null, |
|
"do_eval": false, |
|
"do_predict": false, |
|
"do_train": false, |
|
"embedder_fake_with_zeros": false, |
|
"embedder_model_api": null, |
|
"embedder_model_name": "gtr_base", |
|
"embedder_no_grad": true, |
|
"embedder_torch_dtype": "float32", |
|
"embedding_transform_strategy": "repeat", |
|
"embedding_zero_except_topk": null, |
|
"embeddings_from_layer_n": null, |
|
"encoder_dropout_disabled": false, |
|
"eval_accumulation_steps": null, |
|
"eval_delay": 0, |
|
"eval_steps": 31, |
|
"evaluation_strategy": "steps", |
|
"exp_group_name": "gtr-64-from-gtr-32-overfitting-06-02-24", |
|
"exp_name": "", |
|
"experiment": "inversion", |
|
"fp16": true, |
|
"fp16_backend": "auto", |
|
"fp16_full_eval": false, |
|
"fp16_opt_level": "O1", |
|
"freeze_strategy": "none", |
|
"fsdp": [], |
|
"fsdp_config": { |
|
"min_num_params": 0, |
|
"xla": false, |
|
"xla_fsdp_grad_ckpt": false |
|
}, |
|
"fsdp_min_num_params": 0, |
|
"fsdp_transformer_layer_cls_to_wrap": null, |
|
"full_determinism": false, |
|
"gradient_accumulation_steps": 1, |
|
"gradient_checkpointing": false, |
|
"gradient_checkpointing_kwargs": null, |
|
"greater_is_better": false, |
|
"group_by_length": true, |
|
"half_precision_backend": "auto", |
|
"hub_always_push": false, |
|
"hub_model_id": null, |
|
"hub_private_repo": false, |
|
"hub_strategy": "every_save", |
|
"hub_token": null, |
|
"ignore_data_skip": false, |
|
"include_inputs_for_metrics": true, |
|
"include_tokens_per_second": false, |
|
"jit_mode_eval": false, |
|
"label_names": null, |
|
"label_smoothing_factor": 0.0, |
|
"learning_rate": 0.001, |
|
"length_column_name": "length", |
|
"load_best_model_at_end": true, |
|
"local_rank": 0, |
|
"log_level": "passive", |
|
"log_level_replica": "warning", |
|
"log_on_each_node": true, |
|
"logging_dir": "/home/nlp/matan_avitan/git/vec2text/saves/train_on_bios/output-checkpoin-259966/runs/Feb07_00-28-23_dsinlp01", |
|
"logging_first_step": false, |
|
"logging_nan_inf_filter": true, |
|
"logging_steps": 12, |
|
"logging_strategy": "steps", |
|
"lr_scheduler_type": "constant_with_warmup", |
|
"max_eval_samples": 500, |
|
"max_grad_norm": 1.0, |
|
"max_seq_length": 64, |
|
"max_steps": -1, |
|
"metric_for_best_model": "bias-bios_loss", |
|
"mock_embedder": false, |
|
"model_name_or_path": "MatanAvitan/gtr__nq__64_bios__correct", |
|
"model_revision": "main", |
|
"mp_parameters": "", |
|
"neftune_noise_alpha": null, |
|
"no_cuda": false, |
|
"num_repeat_tokens": 16, |
|
"num_train_epochs": 10.0, |
|
"optim": "adamw_torch", |
|
"optim_args": null, |
|
"output_dir": "/home/nlp/matan_avitan/git/vec2text/saves/train_on_bios/output-checkpoin-259966", |
|
"overwrite_output_dir": false, |
|
"past_index": -1, |
|
"per_device_eval_batch_size": 512, |
|
"per_device_train_batch_size": 512, |
|
"per_gpu_eval_batch_size": null, |
|
"per_gpu_train_batch_size": null, |
|
"prediction_loss_only": false, |
|
"push_to_hub": false, |
|
"push_to_hub_model_id": null, |
|
"push_to_hub_organization": null, |
|
"push_to_hub_token": null, |
|
"ray_scope": "last", |
|
"remove_unused_columns": false, |
|
"report_to": [ |
|
"wandb" |
|
], |
|
"resume_from_checkpoint": "/home/nlp/matan_avitan/git/vec2text/saves/train_on_bios/checkpoint-fake/", |
|
"run_name": "/home/nlp/matan_avitan/git/vec2text/saves/train_on_bios/output-checkpoin-259966", |
|
"save_on_each_node": false, |
|
"save_safetensors": true, |
|
"save_steps": 62, |
|
"save_strategy": "steps", |
|
"save_total_limit": 2, |
|
"seed": 42, |
|
"skip_memory_metrics": true, |
|
"split_batches": false, |
|
"steps_per_epoch": 500000, |
|
"tf32": null, |
|
"tokenizer_name": null, |
|
"torch_compile": false, |
|
"torch_compile_backend": null, |
|
"torch_compile_mode": null, |
|
"torch_dtype": "float32", |
|
"torchdynamo": null, |
|
"tpu_metrics_debug": false, |
|
"tpu_num_cores": null, |
|
"transformers_version": "4.35.0", |
|
"use_cpu": false, |
|
"use_frozen_embeddings_as_input": true, |
|
"use_ipex": false, |
|
"use_legacy_prediction_loop": false, |
|
"use_less_data": -1, |
|
"use_lora": false, |
|
"use_mps_device": false, |
|
"use_wandb": true, |
|
"warmup_ratio": 0.05, |
|
"warmup_steps": 125, |
|
"weight_decay": 0.0 |
|
} |
|
|