NebulaeWis commited on
Commit
29b561d
1 Parent(s): 8a992fe

Upload surtr_arknights.toml

Browse files
Files changed (1) hide show
  1. surtr_arknights.toml +130 -0
surtr_arknights.toml ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Basics]
2
+ pretrained_model_name_or_path = "D:\\stable-diffusion-webui\\models\\Stable-diffusion\\animefull-latest.ckpt"
3
+ train_data_dir = "D:\\train_data\\surtr_arknights"
4
+ resolution = "512,768"
5
+ seed = 23
6
+ max_train_steps = 99999
7
+ max_train_epochs = 24
8
+ clip_skip = 2
9
+
10
+ [Save]
11
+ output_dir = "D:\\train_out\\lora"
12
+ output_name = "surtr_arknights"
13
+ save_precision = "fp16"
14
+ save_model_as = "safetensors"
15
+ save_every_n_epochs = 2
16
+ save_every_n_steps = 9999
17
+ save_state = false
18
+ save_last_n_steps_state = 1
19
+ save_last_n_steps = 200
20
+
21
+ [SDv2]
22
+ v2 = false
23
+ v_parameterization = false
24
+ scale_v_pred_loss_like_noise_pred = false
25
+
26
+ [Network_setup]
27
+ network_dim = 4
28
+ network_alpha = 2
29
+ dim_from_weights = false
30
+ network_dropout = 0
31
+ network_train_unet_only = true
32
+ network_train_text_encoder_only = false
33
+ resume = false
34
+
35
+ [LyCORIS]
36
+ network_module = "lycoris.kohya"
37
+ network_args = [ "preset=attn-mlp", "algo=lora",]
38
+
39
+ [Optimizer]
40
+ train_batch_size = 8
41
+ gradient_checkpointing = true
42
+ gradient_accumulation_steps = 1
43
+ optimizer_type = "AdamW8bit"
44
+ unet_lr = 0.0006
45
+ text_encoder_lr = 0.0006
46
+ max_grad_norm = 1.0
47
+ optimizer_args = [ "weight_decay=0.1", "betas=0.9,0.99",]
48
+
49
+ [Lr_scheduler]
50
+ lr_scheduler_type = ""
51
+ lr_scheduler = "constant"
52
+ lr_warmup_steps = 0
53
+ lr_scheduler_num_cycles = 1
54
+ lr_scheduler_power = 1.0
55
+
56
+ [Training_preciscion]
57
+ mixed_precision = "fp16"
58
+ full_fp16 = false
59
+ full_bf16 = false
60
+
61
+ [Further_improvement]
62
+ min_snr_gamma = 0
63
+ multires_noise_discount = 0.3
64
+ multires_noise_iterations = 6
65
+
66
+ [ARB]
67
+ enable_bucket = true
68
+ min_bucket_reso = 320
69
+ max_bucket_reso = 960
70
+ bucket_reso_steps = 64
71
+ bucket_no_upscale = false
72
+
73
+ [Captions]
74
+ shuffle_caption = true
75
+ caption_extension = ".txt"
76
+ keep_tokens = 1
77
+ caption_dropout_rate = 0.05
78
+ caption_dropout_every_n_epochs = 0
79
+ caption_tag_dropout_rate = 0.0
80
+ max_token_length = 150
81
+ weighted_captions = false
82
+ token_warmup_min = 1
83
+ token_warmup_step = 0
84
+
85
+ [Attention]
86
+ mem_eff_attn = false
87
+ xformers = true
88
+
89
+ [Data_augmentation]
90
+ color_aug = false
91
+ flip_aug = false
92
+ random_crop = false
93
+
94
+ [Cache_latents]
95
+ cache_latents = true
96
+ vae_batch_size = 1
97
+ cache_latents_to_disk = true
98
+
99
+ [Sampling_during_training]
100
+ sample_sampler = "ddim"
101
+
102
+ [Logging]
103
+ logging_dir = "logs_training"
104
+ log_with = "tensorboard"
105
+ log_prefix = "lora_"
106
+
107
+ [Dataset]
108
+ max_data_loader_n_workers = 8
109
+ persistent_data_loader_workers = true
110
+ dataset_repeats = 1
111
+
112
+ [Regularization]
113
+ prior_loss_weight = 1.0
114
+
115
+ [Huggingface]
116
+ save_state_to_huggingface = false
117
+ resume_from_huggingface = false
118
+ async_upload = false
119
+
120
+ [Debugging]
121
+ debug_dataset = false
122
+
123
+ [Deprecated]
124
+ use_8bit_adam = false
125
+ use_lion_optimizer = false
126
+ learning_rate = 0.0002
127
+
128
+ [Others]
129
+ lowram = false
130
+ training_comment = "nebulae"