johnowhitaker
commited on
Commit
•
d7a843d
1
Parent(s):
9633e6a
Update app.py
Browse files
app.py
CHANGED
@@ -16,6 +16,7 @@ image_pipe = DDPMPipeline.from_pretrained(pipeline_name).to(device)
|
|
16 |
scheduler = DDIMScheduler.from_pretrained(pipeline_name)
|
17 |
scheduler.set_timesteps(num_inference_steps=20)
|
18 |
|
|
|
19 |
def color_loss(images, target_color=(0.1, 0.9, 0.5)):
|
20 |
"""Given a target color (R, G, B) return a loss for how far away on average
|
21 |
the images' pixels are from that color. Defaults to a light teal: (0.1, 0.9, 0.5) """
|
@@ -24,7 +25,7 @@ def color_loss(images, target_color=(0.1, 0.9, 0.5)):
|
|
24 |
error = torch.abs(images - target).mean() # Mean absolute difference between the image pixels and the target color
|
25 |
return error
|
26 |
|
27 |
-
|
28 |
def generate(color, guidance_loss_scale):
|
29 |
target_color = ImageColor.getcolor(color, "RGB") # Target color as RGB
|
30 |
target_color = [a/255 for a in target_color] # Rescale from (0, 255) to (0, 1)
|
@@ -45,20 +46,23 @@ def generate(color, guidance_loss_scale):
|
|
45 |
im.save('test.jpeg')
|
46 |
return im
|
47 |
|
|
|
48 |
inputs = [
|
49 |
gr.ColorPicker(label="color", value='55FFAA'), # Add any inputs you need here
|
50 |
gr.Slider(label="guidance_scale", minimum=0, maximum=30, value=3)
|
51 |
]
|
52 |
outputs = gr.Image(label="result")
|
53 |
|
|
|
54 |
demo = gr.Interface(
|
55 |
fn=generate,
|
56 |
inputs=inputs,
|
57 |
outputs=outputs,
|
58 |
examples=[
|
59 |
-
["#BB2266"],["#44CCAA"] # You can provide some example inputs to get people started
|
60 |
],
|
61 |
)
|
62 |
|
|
|
63 |
if __name__ == "__main__":
|
64 |
demo.launch(enable_queue=True)
|
|
|
16 |
scheduler = DDIMScheduler.from_pretrained(pipeline_name)
|
17 |
scheduler.set_timesteps(num_inference_steps=20)
|
18 |
|
19 |
+
# The guidance function
|
20 |
def color_loss(images, target_color=(0.1, 0.9, 0.5)):
|
21 |
"""Given a target color (R, G, B) return a loss for how far away on average
|
22 |
the images' pixels are from that color. Defaults to a light teal: (0.1, 0.9, 0.5) """
|
|
|
25 |
error = torch.abs(images - target).mean() # Mean absolute difference between the image pixels and the target color
|
26 |
return error
|
27 |
|
28 |
+
# And the core function to generate an image given the relevant inputs
|
29 |
def generate(color, guidance_loss_scale):
|
30 |
target_color = ImageColor.getcolor(color, "RGB") # Target color as RGB
|
31 |
target_color = [a/255 for a in target_color] # Rescale from (0, 255) to (0, 1)
|
|
|
46 |
im.save('test.jpeg')
|
47 |
return im
|
48 |
|
49 |
+
# See the gradio docs for the types of inputs and outputs available
|
50 |
inputs = [
|
51 |
gr.ColorPicker(label="color", value='55FFAA'), # Add any inputs you need here
|
52 |
gr.Slider(label="guidance_scale", minimum=0, maximum=30, value=3)
|
53 |
]
|
54 |
outputs = gr.Image(label="result")
|
55 |
|
56 |
+
# Setting up a minimal interface to our function:
|
57 |
demo = gr.Interface(
|
58 |
fn=generate,
|
59 |
inputs=inputs,
|
60 |
outputs=outputs,
|
61 |
examples=[
|
62 |
+
["#BB2266", 3],["#44CCAA", 5] # You can provide some example inputs to get people started
|
63 |
],
|
64 |
)
|
65 |
|
66 |
+
# And launching
|
67 |
if __name__ == "__main__":
|
68 |
demo.launch(enable_queue=True)
|