base_model: Qwen/Qwen1.5-32B model_type: AutoModelForCausalLM tokenizer_type: AutoTokenizer trust_remote_code: true load_in_8bit: false load_in_4bit: false strict: false datasets: - path: u-acc/mimi type: sharegpt conversation: chatml - path: u-acc/sonnetorcasubset type: sharegpt conversation: chatml - path: u-acc/claude_writing type: sharegpt conversation: chatml - path: kalomaze/Opus_Instruct_3k type: sharegpt conversation: chatml - path: kalomaze/Opus_Instruct_25k type: sharegpt conversation: chatml chat_template: chatml dataset_prepared_path: val_set_size: 0.0 output_dir: ./magnum-32b-v1 default_system_message: You are an assistant that responds to the user. sequence_len: 8192 sample_packing: true eval_sample_packing: false pad_to_sequence_len: false adapter: lora_model_dir: lora_r: lora_alpha: lora_dropout: lora_target_linear: lora_fan_in_fan_out: wandb_project: magnum-32b-v2 wandb_entity: wandb_watch: wandb_name: attempt-2 wandb_log_model: gradient_accumulation_steps: 4 micro_batch_size: 1 num_epochs: 2 optimizer: paged_adamw_8bit lr_scheduler: cosine learning_rate: 0.00001 train_on_inputs: false group_by_length: false bf16: auto fp16: tf32: false gradient_checkpointing: unsloth early_stopping_patience: resume_from_checkpoint: local_rank: logging_steps: 1 xformers_attention: flash_attention: true warmup_steps: 30 evals_per_epoch: eval_table_size: eval_max_new_tokens: saves_per_epoch: 2 debug: deepspeed: deepspeed_configs/zero3_bf16.json weight_decay: 0.0 fsdp: fsdp_config: special_tokens: