wim-n3-phi3-mini-merged / training_args.json
yhavinga's picture
Upload Phi-4-mini fine-tuned on n3 with 128K context
8ad2614 verified
raw
history blame contribute delete
838 Bytes
{
"model": "phi4-mini",
"use_unsloth_model": true,
"dataset": "UWV/wim-instruct-wiki-to-jsonld-agent-steps",
"filter_n3_only": true,
"max_samples": null,
"output_dir": "./phi4_lora_r320_128k",
"max_steps": 1000,
"batch_size": 1,
"gradient_accumulation_steps": 8,
"learning_rate": 1e-05,
"warmup_steps": 5,
"max_seq_length": 131072,
"rope_scaling": null,
"lora_r": 320,
"lora_alpha": 320,
"lora_dropout": 0,
"target_modules": [
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj"
],
"load_in_4bit": false,
"load_in_8bit": false,
"use_gradient_checkpointing": true,
"assistant_only_loss": true,
"num_workers": null,
"no_eval": false,
"full_finetune": false,
"wandb": false,
"seed": 42,
"push_to_hub": false,
"hub_model_id": null
}