Spaces:
Runtime error
Runtime error
Mateo Fidabel
commited on
Commit
•
6f417f5
1
Parent(s):
e6915e1
Added more examples, added about info
Browse files- app.py +33 -6
- examples/condition_image_4.png +0 -0
- examples/condition_image_5.png +0 -0
- examples/condition_image_6.png +0 -0
- examples/condition_image_7.png +0 -0
app.py
CHANGED
@@ -26,16 +26,39 @@ p_params = replicate(params)
|
|
26 |
title = "# 🧨 ControlNet on Segment Anything 🤗"
|
27 |
description = """This is a demo on 🧨 ControlNet based on Meta's [Segment Anything Model](https://segment-anything.com/).
|
28 |
|
29 |
-
Upload a Segment Anything Segmentation Map, write a prompt, and generate images 🤗 This demo is still Work in Progress, so don't expect it to work well for now !!
|
30 |
|
31 |
-
|
32 |
-
Test some of the examples below to give it a try ⬇️
|
33 |
"""
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
examples = [["contemporary living room of a house", "low quality", "examples/condition_image_1.png"],
|
36 |
["new york buildings, Vincent Van Gogh starry night ", "low quality, monochrome", "examples/condition_image_2.png"],
|
37 |
-
["contemporary living room, high quality, 4k, realistic", "low quality, monochrome, low res", "examples/condition_image_3.png"]
|
|
|
|
|
|
|
|
|
38 |
|
|
|
39 |
|
40 |
# Inference Function
|
41 |
def infer(prompts, negative_prompts, image, num_inference_steps = 50, seed = 4, num_samples = 4):
|
@@ -93,7 +116,7 @@ prompt = gr.Textbox(lines=1, label="Prompt", value=default_example[0])
|
|
93 |
negative_prompt = gr.Textbox(lines=1, label="Negative Prompt", value=default_example[1])
|
94 |
|
95 |
|
96 |
-
with gr.Blocks(css=
|
97 |
with gr.Row():
|
98 |
with gr.Column():
|
99 |
# Title
|
@@ -103,10 +126,12 @@ with gr.Blocks(css="h1 { text-align: center }") as demo:
|
|
103 |
|
104 |
with gr.Column():
|
105 |
# Examples
|
|
|
106 |
gr.Examples(examples=examples,
|
107 |
inputs=[prompt, negative_prompt, cond_img],
|
108 |
outputs=output,
|
109 |
-
fn=infer
|
|
|
110 |
|
111 |
# Images
|
112 |
with gr.Row(variant="panel"):
|
@@ -130,6 +155,8 @@ with gr.Blocks(css="h1 { text-align: center }") as demo:
|
|
130 |
submit = gr.Button("Generate")
|
131 |
# TODO: Download Button
|
132 |
|
|
|
|
|
133 |
|
134 |
submit.click(infer,
|
135 |
inputs=[prompt, negative_prompt, cond_img, num_steps, seed, num_samples],
|
|
|
26 |
title = "# 🧨 ControlNet on Segment Anything 🤗"
|
27 |
description = """This is a demo on 🧨 ControlNet based on Meta's [Segment Anything Model](https://segment-anything.com/).
|
28 |
|
29 |
+
Upload a Segment Anything Segmentation Map, write a prompt, and generate images 🤗 This demo is still a Work in Progress, so don't expect it to work well for now !!
|
30 |
|
31 |
+
⌛️ It takes about 30~ seconds to generate 4 samples, to get faster results, don't forget to reduce the Nº Samples to 1.
|
|
|
32 |
"""
|
33 |
|
34 |
+
about = """
|
35 |
+
|
36 |
+
|
37 |
+
# 👨💻 About the model
|
38 |
+
|
39 |
+
This model is based on the [ControlNet Model](https://huggingface.co/blog/controlnet), which allow us to generate Images using some sort of condition image. For this model, we selected the segmentation maps produced by Meta's new segmentation model called [Segment Anything Model](https://github.com/facebookresearch/segment-anything) as the condition image. We then trained the model to generate images based on the structure of the segmentation maps and the text prompts given.
|
40 |
+
|
41 |
+
|
42 |
+
# 💾 About the dataset
|
43 |
+
|
44 |
+
For the training, we generated a segmented dataset based on the [COYO-700M](https://huggingface.co/datasets/kakaobrain/coyo-700m) dataset. The dataset provided us with the images, and the text prompts. For the segmented images, we used [Segment Anything Model](https://github.com/facebookresearch/segment-anything). We then created 8k samples train our model on, which isn't a lot, but as a team, we have been very busy with many other responsibilities and time constraints, which made it challenging to dedicate a lot of time to generating a larger dataset. Despite the constraints we faced, we have still managed to achieve some nice results 🙌
|
45 |
+
|
46 |
+
You can check the generated datasets below ⬇️
|
47 |
+
- [sam-coyo-2k](https://huggingface.co/datasets/mfidabel/sam-coyo-2k)
|
48 |
+
- [sam-coyo-2.5k](https://huggingface.co/datasets/mfidabel/sam-coyo-2.5k)
|
49 |
+
- [sam-coyo-3k](https://huggingface.co/datasets/mfidabel/sam-coyo-3k)
|
50 |
+
|
51 |
+
"""
|
52 |
+
|
53 |
examples = [["contemporary living room of a house", "low quality", "examples/condition_image_1.png"],
|
54 |
["new york buildings, Vincent Van Gogh starry night ", "low quality, monochrome", "examples/condition_image_2.png"],
|
55 |
+
["contemporary living room, high quality, 4k, realistic", "low quality, monochrome, low res", "examples/condition_image_3.png"],
|
56 |
+
["internal stairs of a japanese house", "low quality, low res, people, kids", "examples/condition_image_4.png"],
|
57 |
+
["a photo of a girl taking notes", "low quality, low res, painting", "examples/condition_image_5.png"],
|
58 |
+
["painting of an hot air ballon flying over a valley, The Great Wave off Kanagawa style, blue and white colors", "low quality, low res", "examples/condition_image_6.png"],
|
59 |
+
["painting of families enjoying the sunset, The Garden of Earthly Delights style, joyful", "low quality, low res", "examples/condition_image_7.png"]]
|
60 |
|
61 |
+
css = "h1 { text-align: center } .about { text-align: justify; padding-left: 10%; padding-right: 10%; }"
|
62 |
|
63 |
# Inference Function
|
64 |
def infer(prompts, negative_prompts, image, num_inference_steps = 50, seed = 4, num_samples = 4):
|
|
|
116 |
negative_prompt = gr.Textbox(lines=1, label="Negative Prompt", value=default_example[1])
|
117 |
|
118 |
|
119 |
+
with gr.Blocks(css=css) as demo:
|
120 |
with gr.Row():
|
121 |
with gr.Column():
|
122 |
# Title
|
|
|
126 |
|
127 |
with gr.Column():
|
128 |
# Examples
|
129 |
+
gr.Markdown("Try some of the examples below ⬇️")
|
130 |
gr.Examples(examples=examples,
|
131 |
inputs=[prompt, negative_prompt, cond_img],
|
132 |
outputs=output,
|
133 |
+
fn=infer,
|
134 |
+
examples_per_page=4)
|
135 |
|
136 |
# Images
|
137 |
with gr.Row(variant="panel"):
|
|
|
155 |
submit = gr.Button("Generate")
|
156 |
# TODO: Download Button
|
157 |
|
158 |
+
with gr.Row():
|
159 |
+
gr.Markdown(about, elem_classes="about")
|
160 |
|
161 |
submit.click(infer,
|
162 |
inputs=[prompt, negative_prompt, cond_img, num_steps, seed, num_samples],
|
examples/condition_image_4.png
ADDED
examples/condition_image_5.png
ADDED
examples/condition_image_6.png
ADDED
examples/condition_image_7.png
ADDED