Commit
•
44ac339
1
Parent(s):
8a48df3
Update app.py
Browse files
app.py
CHANGED
@@ -34,11 +34,11 @@ model = Blip2ForConditionalGeneration.from_pretrained(
|
|
34 |
|
35 |
training_option_settings = {
|
36 |
"face": {
|
37 |
-
"rank":
|
38 |
"lr_scheduler": "constant",
|
39 |
"with_prior_preservation": True,
|
40 |
"class_prompt": "a photo of a person",
|
41 |
-
"train_steps_multiplier":
|
42 |
"file_count": 150,
|
43 |
"dataset_path": FACES_DATASET_PATH
|
44 |
},
|
@@ -49,12 +49,19 @@ training_option_settings = {
|
|
49 |
"class_prompt": "",
|
50 |
"train_steps_multiplier": 150
|
51 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
"object": {
|
53 |
-
"rank":
|
54 |
"lr_scheduler": "constant",
|
55 |
"with_prior_preservation": False,
|
56 |
"class_prompt": "",
|
57 |
-
"train_steps_multiplier":
|
58 |
},
|
59 |
"custom": {
|
60 |
"rank": 32,
|
@@ -69,7 +76,7 @@ num_images_settings = {
|
|
69 |
#>24 images, 1 repeat; 10<x<24 images 2 repeats; <10 images 3 repeats
|
70 |
"repeats": [(24, 1), (10, 2), (0, 3)],
|
71 |
"train_steps_min": 500,
|
72 |
-
"train_steps_max":
|
73 |
}
|
74 |
|
75 |
def load_captioning(uploaded_images, option):
|
@@ -106,6 +113,8 @@ def make_options_visible(option):
|
|
106 |
sentence = "A photo of TOK"
|
107 |
elif option == "style":
|
108 |
sentence = "in the style of TOK"
|
|
|
|
|
109 |
elif option == "custom":
|
110 |
sentence = "TOK"
|
111 |
return (
|
@@ -522,7 +531,7 @@ with gr.Blocks(css=css, theme=theme) as demo:
|
|
522 |
with gr.Column(elem_classes=["main_unlogged"]) as main_ui:
|
523 |
lora_name = gr.Textbox(label="The name of your LoRA", info="This has to be a unique name", placeholder="e.g.: Persian Miniature Painting style, Cat Toy")
|
524 |
training_option = gr.Radio(
|
525 |
-
label="What are you training?", choices=["object", "style", "face", "custom"]
|
526 |
)
|
527 |
concept_sentence = gr.Textbox(
|
528 |
label="Concept sentence",
|
|
|
34 |
|
35 |
training_option_settings = {
|
36 |
"face": {
|
37 |
+
"rank": 32,
|
38 |
"lr_scheduler": "constant",
|
39 |
"with_prior_preservation": True,
|
40 |
"class_prompt": "a photo of a person",
|
41 |
+
"train_steps_multiplier": 150,
|
42 |
"file_count": 150,
|
43 |
"dataset_path": FACES_DATASET_PATH
|
44 |
},
|
|
|
49 |
"class_prompt": "",
|
50 |
"train_steps_multiplier": 150
|
51 |
},
|
52 |
+
"character": {
|
53 |
+
"rank": 32,
|
54 |
+
"lr_scheduler": "constant",
|
55 |
+
"with_prior_preservation": False,
|
56 |
+
"class_prompt": "",
|
57 |
+
"train_steps_multiplier": 200
|
58 |
+
},
|
59 |
"object": {
|
60 |
+
"rank": 16,
|
61 |
"lr_scheduler": "constant",
|
62 |
"with_prior_preservation": False,
|
63 |
"class_prompt": "",
|
64 |
+
"train_steps_multiplier": 50
|
65 |
},
|
66 |
"custom": {
|
67 |
"rank": 32,
|
|
|
76 |
#>24 images, 1 repeat; 10<x<24 images 2 repeats; <10 images 3 repeats
|
77 |
"repeats": [(24, 1), (10, 2), (0, 3)],
|
78 |
"train_steps_min": 500,
|
79 |
+
"train_steps_max": 1500
|
80 |
}
|
81 |
|
82 |
def load_captioning(uploaded_images, option):
|
|
|
113 |
sentence = "A photo of TOK"
|
114 |
elif option == "style":
|
115 |
sentence = "in the style of TOK"
|
116 |
+
elif option == "character":
|
117 |
+
sentence = "A TOK character"
|
118 |
elif option == "custom":
|
119 |
sentence = "TOK"
|
120 |
return (
|
|
|
531 |
with gr.Column(elem_classes=["main_unlogged"]) as main_ui:
|
532 |
lora_name = gr.Textbox(label="The name of your LoRA", info="This has to be a unique name", placeholder="e.g.: Persian Miniature Painting style, Cat Toy")
|
533 |
training_option = gr.Radio(
|
534 |
+
label="What are you training?", choices=["object", "style", "character", "face", "custom"]
|
535 |
)
|
536 |
concept_sentence = gr.Textbox(
|
537 |
label="Concept sentence",
|