File size: 2,585 Bytes
6d32cec |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
[ARB]
bucket_no_upscale = false
bucket_reso_steps = 64
enable_bucket = true
max_bucket_reso = 1584
min_bucket_reso = 327
[Attention]
mem_eff_attn = false
xformers = true
[Basics]
clip_skip = 2
max_train_epochs = 30
max_train_steps = 1073741824
pretrained_model_name_or_path = "******"
reg_data_dir = "******"
resolution = "720,720"
seed = 171927893
train_data_dir = "******"
[Cache_latents]
cache_latents = true
vae_batch_size = 1
cache_latents_to_disk = true
[Captions]
shuffle_caption = true
caption_extension = ".txt"
keep_tokens = 1
caption_dropout_rate = 0.05
caption_dropout_every_n_epochs = 0
caption_tag_dropout_rate = 0.0
max_token_length = 150
weighted_captions = false
token_warmup_min = 1
token_warmup_step = 0
[Data_augmentation]
color_aug = false
flip_aug = false
random_crop = false
[Dataset]
max_data_loader_n_workers = 8
persistent_data_loader_workers = true
dataset_repeats = 1
[Debugging]
debug_dataset = false
[Deprecated]
use_8bit_adam = false
use_lion_optimizer = false
learning_rate = 0.0002
[Further_improvement]
min_snr_gamma = 0
multires_noise_discount = 0.3
multires_noise_iterations = 6
[Huggingface]
save_state_to_huggingface = false
resume_from_huggingface = false
async_upload = false
[Logging]
logging_dir = "******"
log_with = "tensorboard"
log_prefix = "lora_"
[Lr_scheduler]
lr_scheduler_type = ""
lr_scheduler = "constant"
lr_warmup_steps = 0
lr_scheduler_num_cycles = 1
lr_scheduler_power = 1.0
[LyCORIS]
network_module = "lycoris.kohya"
network_args = [ "preset=attn-mlp", "algo=lora",]
[Network_setup]
dim_from_weights = false
network_alpha = 2
network_dim = 4
network_dropout = 0
network_train_text_encoder_only = false
network_train_unet_only = true
resume = false
[Optimizer]
gradient_accumulation_steps = 1
gradient_checkpointing = true
max_grad_norm = 1.0
optimizer_args = [ "weight_decay=0.1", "betas=0.9,0.99",]
optimizer_type = "AdamW8bit"
text_encoder_lr = 0.0006
train_batch_size = 8
unet_lr = 0.0006
[Others]
lowram = false
training_comment = "narugo1992's automated LoRA training, based on nebulae's config."
[Regularization]
prior_loss_weight = 1.0
[SDv2]
v2 = false
v_parameterization = false
scale_v_pred_loss_like_noise_pred = false
[Sampling_during_training]
sample_sampler = "ddim"
[Save]
output_dir = "******"
output_name = "vigna_arknights"
save_every_n_epochs = 2
save_every_n_steps = 1073741824
save_last_n_steps = 200
save_last_n_steps_state = 1
save_model_as = "safetensors"
save_precision = "fp16"
save_state = false
[Training_preciscion]
mixed_precision = "fp16"
full_fp16 = false
full_bf16 = false
|