llava-v1.6-vicuna-7b-4bit-lora-sft / llamaboard_config.yaml
Ubuntu
Added trained v1.6 model
d151e03
top.booster: unsloth
top.checkpoint_path: []
top.finetuning_type: lora
top.model_name: LLaVA1.5-7B-Chat
top.quantization_bit: none
top.rope_scaling: none
top.template: vicuna
top.visual_inputs: true
train.additional_target: ''
train.badam_mode: layer
train.badam_switch_interval: 50
train.badam_switch_mode: ascending
train.badam_update_ratio: 0.05
train.batch_size: 1
train.compute_type: bf16
train.create_new_adapter: false
train.cutoff_len: 512
train.dataset:
- llava_med_vi_1k
train.dataset_dir: data
train.ds_offload: false
train.ds_stage: none
train.freeze_extra_modules: ''
train.freeze_trainable_layers: 2
train.freeze_trainable_modules: all
train.galore_rank: 16
train.galore_scale: 0.25
train.galore_target: all
train.galore_update_interval: 200
train.gradient_accumulation_steps: 8
train.learning_rate: 5e-5
train.logging_steps: 5
train.lora_alpha: 16
train.lora_dropout: 0
train.lora_rank: 2
train.lora_target: ''
train.loraplus_lr_ratio: 0
train.lr_scheduler_type: cosine
train.max_grad_norm: '1.0'
train.max_samples: '100'
train.neftune_alpha: 0
train.num_train_epochs: '1.0'
train.optim: adamw_torch
train.packing: false
train.ppo_score_norm: false
train.ppo_whiten_rewards: false
train.pref_beta: 0.1
train.pref_ftx: 0
train.pref_loss: sigmoid
train.report_to: false
train.resize_vocab: false
train.reward_model: null
train.save_steps: 100
train.shift_attn: false
train.training_stage: Supervised Fine-Tuning
train.upcast_layernorm: false
train.use_badam: false
train.use_dora: false
train.use_galore: false
train.use_llama_pro: false
train.use_pissa: false
train.use_rslora: false
train.val_size: 0
train.warmup_steps: 0