|
[ARB] |
|
bucket_no_upscale = false |
|
bucket_reso_steps = 64 |
|
enable_bucket = true |
|
max_bucket_reso = 1584 |
|
min_bucket_reso = 327 |
|
|
|
[Attention] |
|
mem_eff_attn = false |
|
xformers = true |
|
|
|
[Basics] |
|
clip_skip = 2 |
|
max_train_epochs = 20 |
|
max_train_steps = 1073741824 |
|
pretrained_model_name_or_path = "******" |
|
reg_data_dir = "******" |
|
resolution = "720,720" |
|
seed = 142760847 |
|
train_data_dir = "******" |
|
|
|
[Cache_latents] |
|
cache_latents = true |
|
vae_batch_size = 1 |
|
cache_latents_to_disk = true |
|
|
|
[Captions] |
|
shuffle_caption = true |
|
caption_extension = ".txt" |
|
keep_tokens = 1 |
|
caption_dropout_rate = 0.05 |
|
caption_dropout_every_n_epochs = 0 |
|
caption_tag_dropout_rate = 0.0 |
|
max_token_length = 150 |
|
weighted_captions = false |
|
token_warmup_min = 1 |
|
token_warmup_step = 0 |
|
|
|
[Data_augmentation] |
|
color_aug = false |
|
flip_aug = false |
|
random_crop = false |
|
|
|
[Dataset] |
|
max_data_loader_n_workers = 8 |
|
persistent_data_loader_workers = true |
|
dataset_repeats = 1 |
|
|
|
[Debugging] |
|
debug_dataset = false |
|
|
|
[Deprecated] |
|
use_8bit_adam = false |
|
use_lion_optimizer = false |
|
learning_rate = 0.0002 |
|
|
|
[Further_improvement] |
|
min_snr_gamma = 0 |
|
multires_noise_discount = 0.3 |
|
multires_noise_iterations = 6 |
|
|
|
[Huggingface] |
|
save_state_to_huggingface = false |
|
resume_from_huggingface = false |
|
async_upload = false |
|
|
|
[Logging] |
|
logging_dir = "******" |
|
log_with = "tensorboard" |
|
log_prefix = "lora_" |
|
|
|
[Lr_scheduler] |
|
lr_scheduler_type = "" |
|
lr_scheduler = "constant" |
|
lr_warmup_steps = 0 |
|
lr_scheduler_num_cycles = 1 |
|
lr_scheduler_power = 1.0 |
|
|
|
[LyCORIS] |
|
network_module = "lycoris.kohya" |
|
network_args = [ "preset=attn-mlp", "algo=lora",] |
|
|
|
[Network_setup] |
|
dim_from_weights = false |
|
network_alpha = 2 |
|
network_dim = 4 |
|
network_dropout = 0 |
|
network_train_text_encoder_only = false |
|
network_train_unet_only = true |
|
resume = false |
|
|
|
[Optimizer] |
|
gradient_accumulation_steps = 1 |
|
gradient_checkpointing = true |
|
max_grad_norm = 1.0 |
|
optimizer_args = [ "weight_decay=0.1", "betas=0.9,0.99",] |
|
optimizer_type = "AdamW8bit" |
|
text_encoder_lr = 0.0006 |
|
train_batch_size = 8 |
|
unet_lr = 0.0006 |
|
|
|
[Others] |
|
lowram = false |
|
training_comment = "narugo1992's automated LoRA training, based on nebulae's config." |
|
|
|
[Regularization] |
|
prior_loss_weight = 1.0 |
|
|
|
[SDv2] |
|
v2 = false |
|
v_parameterization = false |
|
scale_v_pred_loss_like_noise_pred = false |
|
|
|
[Sampling_during_training] |
|
sample_sampler = "ddim" |
|
|
|
[Save] |
|
output_dir = "******" |
|
output_name = "lisa_genshin" |
|
save_every_n_epochs = 1 |
|
save_every_n_steps = 1073741824 |
|
save_last_n_steps = 200 |
|
save_last_n_steps_state = 1 |
|
save_model_as = "safetensors" |
|
save_precision = "fp16" |
|
save_state = false |
|
|
|
[Training_preciscion] |
|
mixed_precision = "fp16" |
|
full_fp16 = false |
|
full_bf16 = false |
|
|