kohya-training / abarb_v1.json
root
training
4bacd0b
{
"LoRA_type": "Standard",
"adaptive_noise_scale": 0.00357,
"additional_parameters": "--log_prefix=xl-loha",
"block_alphas": "",
"block_dims": "",
"block_lr_zero_threshold": "",
"bucket_no_upscale": false,
"bucket_reso_steps": 64,
"cache_latents": true,
"cache_latents_to_disk": true,
"caption_dropout_every_n_epochs": 0.0,
"caption_dropout_rate": 0,
"caption_extension": ".txt2",
"clip_skip": "1",
"color_aug": false,
"conv_alpha": 4,
"conv_block_alphas": "",
"conv_block_dims": "",
"conv_dim": 4,
"decompose_both": false,
"dim_from_weights": false,
"down_lr_weight": "",
"enable_bucket": true,
"epoch": 10,
"factor": -1,
"flip_aug": false,
"full_bf16": false,
"full_fp16": false,
"gradient_accumulation_steps": 1,
"gradient_checkpointing": false,
"keep_tokens": 1,
"learning_rate": 2e-05,
"logging_dir": "/workspace/kohya_ss/dataset/kohya-training/log",
"lora_network_weights": "",
"lr_scheduler": "adafactor",
"lr_scheduler_args": "",
"lr_scheduler_num_cycles": "1",
"lr_scheduler_power": "",
"lr_warmup": 0,
"max_bucket_reso": 2048,
"max_data_loader_n_workers": "0",
"max_resolution": "1024,1024",
"max_timestep": 1000,
"max_token_length": "75",
"max_train_epochs": "",
"max_train_steps": "",
"mem_eff_attn": false,
"mid_lr_weight": "",
"min_bucket_reso": 64,
"min_snr_gamma": 0,
"min_timestep": 0,
"mixed_precision": "bf16",
"model_list": "custom",
"module_dropout": 0,
"multires_noise_discount": 0,
"multires_noise_iterations": 0,
"network_alpha": 8,
"network_dim": 32,
"network_dropout": 0,
"no_token_padding": false,
"noise_offset": 0.0357,
"noise_offset_type": "Original",
"num_cpu_threads_per_process": 16,
"optimizer": "Adafactor",
"optimizer_args": "",
"output_dir": "/workspace/kohya_ss/dataset/kohya-training/model",
"output_name": "abarb_v1",
"persistent_data_loader_workers": false,
"pretrained_model_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0",
"prior_loss_weight": 1.0,
"random_crop": false,
"rank_dropout": 0,
"reg_data_dir": "/workspace/kohya_ss/dataset/kohya-training/reg",
"resume": "",
"sample_every_n_epochs": 0,
"sample_every_n_steps": 250,
"sample_prompts": "a photo \\(medium\\) of abarb \\(subject\\), smile, highres, best quality, full sharp, 4 k photography --n lowres, low quality, blurry, out of focus, low effort, poor --w 1024 --h 1024 --d 420 --l 7.5 --s 30\n\na photo \\(medium\\) of abarb \\(subject\\), upper body, highres, best quality, full sharp, 4 k photography --n lowres, low quality, blurry, out of focus, low effort, poor --w 832 --h 1216 --d 421 --l 7.5 --s 30\n\na photo \\(medium\\) of abarb \\(subject\\), smile, highres, best quality, full sharp, 4 k photography --n lowres, low quality, blurry, out of focus, low effort, poor --w 1152 --h 896 --d 422 --l 7.5 --s 30\n\na photo \\(medium\\) of abarb \\(subject\\), swimsuit \\(wardrobe\\), highres, best quality, full sharp, 4 k photography --n lowres, low quality, blurry, out of focus, low effort, poor --w 896 --h 1152 --d 423 --l 7.5 --s 30",
"sample_sampler": "euler_a",
"save_every_n_epochs": 1,
"save_every_n_steps": 0,
"save_last_n_steps": 0,
"save_last_n_steps_state": 0,
"save_model_as": "safetensors",
"save_precision": "bf16",
"save_state": true,
"scale_v_pred_loss_like_noise_pred": false,
"scale_weight_norms": 1,
"sdxl": true,
"sdxl_cache_text_encoder_outputs": false,
"sdxl_no_half_vae": true,
"seed": "",
"shuffle_caption": false,
"stop_text_encoder_training": 0,
"text_encoder_lr": 2e-05,
"train_batch_size": 1,
"train_data_dir": "/workspace/kohya_ss/dataset/kohya-training/img",
"train_on_input": false,
"training_comment": "trigger: the white queen",
"unet_lr": 2e-05,
"unit": 1,
"up_lr_weight": "",
"use_cp": false,
"use_wandb": true,
"v2": false,
"v_parameterization": false,
"v_pred_like_loss": 0,
"vae_batch_size": 0,
"wandb_api_key": "0c90f14969d47b1b86b7fe4f2ecca630f3540eb6",
"weighted_captions": false,
"xformers": "xformers"
}