josdirksen
commited on
Commit
•
ccd9455
1
Parent(s):
a8ad226
Upload ai-toolkit_config.yaml with huggingface_hub
Browse files- ai-toolkit_config.yaml +94 -0
ai-toolkit_config.yaml
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
job: extension
|
3 |
+
config:
|
4 |
+
# this name will be the folder and filename name
|
5 |
+
name: "my_first_flux_lora_v1"
|
6 |
+
process:
|
7 |
+
- type: 'sd_trainer'
|
8 |
+
# root folder to save training sessions/samples/weights
|
9 |
+
training_folder: "output"
|
10 |
+
# uncomment to see performance stats in the terminal every N steps
|
11 |
+
|
12 |
+
# performance_log_every: 1000
|
13 |
+
device: cuda:0
|
14 |
+
# if a trigger word is specified, it will be added to captions of training data if it does not already exist
|
15 |
+
# alternatively, in your captions you can add [trigger] and it will be replaced with the trigger word
|
16 |
+
|
17 |
+
# trigger_word: "p3r5on"
|
18 |
+
network:
|
19 |
+
type: "lora"
|
20 |
+
linear: 16
|
21 |
+
linear_alpha: 16
|
22 |
+
save:
|
23 |
+
dtype: float16 # precision to save
|
24 |
+
save_every: 500 # save every this many steps
|
25 |
+
max_step_saves_to_keep: 4 # how many intermittent saves to keep
|
26 |
+
push_to_hub: false #change this to True to push your trained model to Hugging Face.
|
27 |
+
# You can either set up a HF_TOKEN env variable or you'll be prompted to log-in
|
28 |
+
# hf_repo_id: your-username/your-model-slug
|
29 |
+
# hf_private: true #whether the repo is private or public
|
30 |
+
|
31 |
+
datasets:
|
32 |
+
# datasets are a folder of images. captions need to be txt files with the same name as the image
|
33 |
+
# for instance image2.jpg and image2.txt. Only jpg, jpeg, and png are supported currently
|
34 |
+
# images will automatically be resized and bucketed into the resolution specified
|
35 |
+
# on windows, escape back slashes with another backslash so
|
36 |
+
# "C:\\path\\to\\images\\folder"
|
37 |
+
- folder_path: "/workspace/ai-toolkit/images"
|
38 |
+
caption_ext: "txt"
|
39 |
+
caption_dropout_rate: 0.05 # will drop out the caption 5% of time
|
40 |
+
shuffle_tokens: false # shuffle caption order, split by commas
|
41 |
+
cache_latents_to_disk: true # leave this true unless you know what you're doing
|
42 |
+
resolution: [512, 768, 1024] # flux enjoys multiple resolutions
|
43 |
+
train:
|
44 |
+
batch_size: 1
|
45 |
+
steps: 1000 # total number of steps to train 500 - 4000 is a good range
|
46 |
+
gradient_accumulation_steps: 1
|
47 |
+
train_unet: true
|
48 |
+
train_text_encoder: false # probably won't work with flux
|
49 |
+
gradient_checkpointing: true # need the on unless you have a ton of vram
|
50 |
+
noise_scheduler: "flowmatch" # for training only
|
51 |
+
optimizer: "adamw8bit"
|
52 |
+
lr: 0.0004
|
53 |
+
# skip_first_sample: true
|
54 |
+
# uncomment to completely disable sampling
|
55 |
+
# disable_sampling: true
|
56 |
+
# uncomment to use new vell curved weighting. Experimental but may produce better results
|
57 |
+
# linear_timesteps: true
|
58 |
+
|
59 |
+
# ema will smooth out learning, but could slow it down. Recommended to leave on.
|
60 |
+
ema_config:
|
61 |
+
use_ema: true
|
62 |
+
ema_decay: 0.99
|
63 |
+
# will probably need this if gpu supports it for flux, other dtypes may not work correctly
|
64 |
+
dtype: bf16
|
65 |
+
model:
|
66 |
+
# huggingface model name or path
|
67 |
+
name_or_path: "black-forest-labs/FLUX.1-dev"
|
68 |
+
is_flux: true
|
69 |
+
quantize: true # run 8bit mixed precision
|
70 |
+
# low_vram: true # uncomment this if the GPU is connected to your monitors. It will use less vram to quantize, but is slower.
|
71 |
+
sample:
|
72 |
+
sampler: "flowmatch" # must match train.noise_scheduler
|
73 |
+
sample_every: 500 # sample every this many steps
|
74 |
+
width: 1024
|
75 |
+
height: 1024
|
76 |
+
prompts:
|
77 |
+
- Photo of elinelora holding a sign that says 'I LOVE PROMPTS!'
|
78 |
+
- Professional headshot of elinelora in a business suit.
|
79 |
+
- A happy pilot elinelora of a Boeing 747.
|
80 |
+
- A doctor elinelora talking to a patient.
|
81 |
+
- A chef elinelora in the middle of a bustling kitchen, plating a beautifully arranged dish.
|
82 |
+
- A young elinelora with a big grin, holding a large ice cream cone in front of an old-fashioned ice cream parlor.
|
83 |
+
- A person elinelora in a tuxedo, looking directly into the camera with a confident smile, standing on a red carpet at a gala event.
|
84 |
+
- Person elinelora with a bitchin' 80's mullet hairstyle leaning out the window of a pontiac firebird
|
85 |
+
neg: "" # not used on flux
|
86 |
+
seed: 42
|
87 |
+
walk_seed: true
|
88 |
+
guidance_scale: 4
|
89 |
+
sample_steps: 20
|
90 |
+
trigger_word: elinelora
|
91 |
+
# you can add any additional meta info here. [name] is replaced with config name at top
|
92 |
+
meta:
|
93 |
+
name: "[name]"
|
94 |
+
version: '1.0'
|