|
{ |
|
"lora_name": "Humility", |
|
"always_override": true, |
|
"q_proj_en": true, |
|
"v_proj_en": true, |
|
"k_proj_en": false, |
|
"o_proj_en": false, |
|
"gate_proj_en": false, |
|
"down_proj_en": false, |
|
"up_proj_en": false, |
|
"save_steps": 0.0, |
|
"micro_batch_size": 4, |
|
"batch_size": 128, |
|
"epochs": 10.0, |
|
"learning_rate": "3e-4", |
|
"lr_scheduler_type": "linear", |
|
"lora_rank": 128, |
|
"lora_alpha": 256, |
|
"lora_dropout": 0.05, |
|
"cutoff_len": 256, |
|
"dataset": "norobots_alpaca", |
|
"eval_dataset": "None", |
|
"format": "alpaca-format", |
|
"eval_steps": 100.0, |
|
"raw_text_file": "gnosisreformatted", |
|
"overlap_len": 128, |
|
"newline_favor_len": 128, |
|
"higher_rank_limit": false, |
|
"warmup_steps": 100.0, |
|
"optimizer": "adamw_torch", |
|
"hard_cut_string": "\\n\\n\\n", |
|
"train_only_after": "", |
|
"stop_at_loss": 0, |
|
"add_eos_token": false, |
|
"min_chars": 0.0, |
|
"report_to": "None" |
|
} |