QQQRINKAPPP / training_config.toml
Mustru's picture
Upload folder using huggingface_hub
704c909
[additional_network_arguments]
unet_lr = 0.0002
text_encoder_lr = 0.0001
network_dim = 128
network_alpha = 1
network_module = "networks.lora"
[optimizer_arguments]
learning_rate = 0.0002
lr_scheduler = "cosine_with_restarts"
lr_scheduler_num_cycles = 3
lr_warmup_steps = 268
optimizer_type = "AdamW8bit"
[training_arguments]
max_train_epochs = 20
save_every_n_epochs = 1
save_last_n_epochs = 20
train_batch_size = 6
clip_skip = 2
min_snr_gamma = 5.0
weighted_captions = false
seed = 42
max_token_length = 225
xformers = true
lowram = true
max_data_loader_n_workers = 8
persistent_data_loader_workers = true
save_precision = "fp16"
mixed_precision = "fp16"
output_dir = "/content/drive/MyDrive/Loras/QQQRINKAPPP_V1/output"
logging_dir = "/content/drive/MyDrive/Loras/_logs"
output_name = "QQQRINKAPPP_V1"
log_prefix = "QQQRINKAPPP_V1"
save_state = false
[model_arguments]
pretrained_model_name_or_path = "/content/animefull-final-pruned-fp16.safetensors"
v2 = false
[saving_arguments]
save_model_as = "safetensors"
[dreambooth_arguments]
prior_loss_weight = 1.0
[dataset_arguments]
cache_latents = true