|
top.booster: flashattn2
|
|
top.checkpoint_path:
|
|
- Luminia-8B-RP
|
|
top.finetuning_type: lora
|
|
top.model_name: LLaMA3.1-8B-Chat
|
|
top.quantization_bit: '4'
|
|
top.quantization_method: bitsandbytes
|
|
top.rope_scaling: linear
|
|
top.template: alpaca
|
|
top.visual_inputs: false
|
|
train.additional_target: ''
|
|
train.badam_mode: layer
|
|
train.badam_switch_interval: 50
|
|
train.badam_switch_mode: ascending
|
|
train.badam_update_ratio: 0.05
|
|
train.batch_size: 1
|
|
train.compute_type: bf16
|
|
train.create_new_adapter: false
|
|
train.cutoff_len: 4000
|
|
train.dataset:
|
|
- qa-unc-dpo
|
|
train.dataset_dir: data
|
|
train.ds_offload: false
|
|
train.ds_stage: none
|
|
train.freeze_extra_modules: ''
|
|
train.freeze_trainable_layers: 2
|
|
train.freeze_trainable_modules: all
|
|
train.galore_rank: 16
|
|
train.galore_scale: 0.25
|
|
train.galore_target: all
|
|
train.galore_update_interval: 200
|
|
train.gradient_accumulation_steps: 1
|
|
train.learning_rate: 5e-5
|
|
train.logging_steps: 10
|
|
train.lora_alpha: 64
|
|
train.lora_dropout: 0.35
|
|
train.lora_rank: 32
|
|
train.lora_target: all
|
|
train.loraplus_lr_ratio: 0
|
|
train.lr_scheduler_type: cosine
|
|
train.mask_history: false
|
|
train.max_grad_norm: '1.0'
|
|
train.max_samples: '100000'
|
|
train.neat_packing: false
|
|
train.neftune_alpha: 5
|
|
train.num_train_epochs: '1.0'
|
|
train.optim: adamw_8bit
|
|
train.packing: true
|
|
train.ppo_score_norm: false
|
|
train.ppo_whiten_rewards: false
|
|
train.pref_beta: 0.1
|
|
train.pref_ftx: 0
|
|
train.pref_loss: orpo
|
|
train.report_to: false
|
|
train.resize_vocab: false
|
|
train.reward_model:
|
|
- Luminia-8B-RP
|
|
train.save_steps: 1000
|
|
train.shift_attn: false
|
|
train.train_on_prompt: false
|
|
train.training_stage: DPO
|
|
train.use_badam: false
|
|
train.use_dora: false
|
|
train.use_galore: false
|
|
train.use_llama_pro: false
|
|
train.use_pissa: false
|
|
train.use_rslora: false
|
|
train.val_size: 0
|
|
train.warmup_steps: 0
|
|
|