Spaces:
Runtime error
Runtime error
File size: 8,002 Bytes
d4fe1e6 65e6336 d4fe1e6 65e6336 d4fe1e6 04c4c30 c39e9ff d4fe1e6 5d29bbd 4ab31f0 8d6462e d4fe1e6 c39e9ff 241cc6f d4fe1e6 c39e9ff c186a11 8d6462e e0a15c9 c39e9ff eb601c1 4ab31f0 d4fe1e6 5d29bbd bc31fa8 5d29bbd eb601c1 5d29bbd eb601c1 c39e9ff bc31fa8 5d29bbd eb601c1 4ab31f0 eb601c1 5d29bbd eb601c1 1955fd3 5d29bbd c97c9f9 5d29bbd 4ab31f0 5d29bbd eb601c1 5d29bbd bc31fa8 38930b8 5d29bbd eb601c1 5d29bbd eb601c1 5d29bbd eb601c1 5219f50 3f591a2 eb601c1 5d29bbd 4ab31f0 e6ffe8d 4ab31f0 8d6462e 4ab31f0 3767bc2 c39e9ff 4ab31f0 e9116d0 4ab31f0 3767bc2 5d29bbd e6ffe8d 3f1a935 45b25a1 c195fe0 ff2dfb3 63a03db 4ab31f0 1955fd3 2442693 c52d702 5388bef 1955fd3 b71a137 4ab31f0 9f16209 e6ffe8d 089a04d 1955fd3 204dc42 eb601c1 c52d702 4ab31f0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
# -*- coding: utf-8 -*-
"""Copy of compose_glide.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/19xx6Nu4FeiGj-TzTUFxBf-15IkeuFx_F
"""
import streamlit as st
import gradio as gr
import torch as th
from composable_diffusion.download import download_model
from composable_diffusion.model_creation import create_model_and_diffusion as create_model_and_diffusion_for_clevr
from composable_diffusion.model_creation import model_and_diffusion_defaults as model_and_diffusion_defaults_for_clevr
from composable_diffusion.composable_stable_diffusion.pipeline_composable_stable_diffusion import ComposableStableDiffusionPipeline
# This notebook supports both CPU and GPU.
# On CPU, generating one sample may take on the order of 20 minutes.
# On a GPU, it should be under a minute.
has_cuda = th.cuda.is_available()
device = th.device('cpu' if not th.cuda.is_available() else 'cuda')
# init stable diffusion model
pipe = ComposableStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
use_auth_token=st.secrets["USER_TOKEN"]
).to(device)
pipe.safety_checker = None
# create model for CLEVR Objects
clevr_options = model_and_diffusion_defaults_for_clevr()
flags = {
"image_size": 128,
"num_channels": 192,
"num_res_blocks": 2,
"learn_sigma": True,
"use_scale_shift_norm": False,
"raw_unet": True,
"noise_schedule": "squaredcos_cap_v2",
"rescale_learned_sigmas": False,
"rescale_timesteps": False,
"num_classes": '2',
"dataset": "clevr_pos",
"use_fp16": has_cuda,
"timestep_respacing": '100'
}
for key, val in flags.items():
clevr_options[key] = val
clevr_model, clevr_diffusion = create_model_and_diffusion_for_clevr(**clevr_options)
clevr_model.eval()
if has_cuda:
clevr_model.convert_to_fp16()
clevr_model.to(device)
clevr_model.load_state_dict(th.load(download_model('clevr_pos'), device))
print('total clevr_pos parameters', sum(x.numel() for x in clevr_model.parameters()))
def compose_clevr_objects(prompt, weights, steps):
weights = [float(x.strip()) for x in weights.split('|')]
weights = th.tensor(weights, device=device).reshape(-1, 1, 1, 1)
coordinates = [
[
float(x.split(',')[0].strip()), float(x.split(',')[1].strip())]
for x in prompt.split('|')
]
coordinates += [[-1, -1]] # add unconditional score label
batch_size = 1
clevr_options['timestep_respacing'] = str(int(steps))
_, clevr_diffusion = create_model_and_diffusion_for_clevr(**clevr_options)
def model_fn(x_t, ts, **kwargs):
half = x_t[:1]
combined = th.cat([half] * kwargs['y'].size(0), dim=0)
model_out = clevr_model(combined, ts, **kwargs)
eps, rest = model_out[:, :3], model_out[:, 3:]
masks = kwargs.get('masks')
cond_eps = eps[masks]
uncond_eps = eps[~masks]
half_eps = uncond_eps + (weights * (cond_eps - uncond_eps)).sum(dim=0, keepdims=True)
eps = th.cat([half_eps] * x_t.size(0), dim=0)
return th.cat([eps, rest], dim=1)
def sample(coordinates):
masks = [True] * (len(coordinates) - 1) + [False]
model_kwargs = dict(
y=th.tensor(coordinates, dtype=th.float, device=device),
masks=th.tensor(masks, dtype=th.bool, device=device)
)
samples = clevr_diffusion.p_sample_loop(
model_fn,
(len(coordinates), 3, clevr_options["image_size"], clevr_options["image_size"]),
device=device,
clip_denoised=True,
progress=True,
model_kwargs=model_kwargs,
cond_fn=None,
)[:batch_size]
return samples
samples = sample(coordinates)
out_img = samples[0].permute(1, 2, 0)
out_img = (out_img + 1) / 2
out_img = (out_img.detach().cpu() * 255.).to(th.uint8)
out_img = out_img.numpy()
return out_img
def stable_diffusion_compose(prompt, steps, weights, seed):
generator = th.Generator("cuda").manual_seed(int(seed))
image = pipe(prompt, guidance_scale=7.5, num_inference_steps=steps,
weights=weights, generator=generator).images[0]
image.save(f'{"_".join(prompt.split())}.png')
return image
def compose(prompt, weights, version, steps, seed):
try:
with th.no_grad():
if version == 'Stable_Diffusion_1v_4':
res = stable_diffusion_compose(prompt, steps, weights, seed)
return res
else:
return compose_clevr_objects(prompt, weights, steps)
except Exception as e:
print(e)
return None
examples_1 = "A castle in a forest | grainy, fog"
examples_3 = '0.1, 0.5 | 0.3, 0.5 | 0.5, 0.5 | 0.7, 0.5 | 0.9, 0.5'
examples_5 = 'a white church | lightning in the background'
examples_6 = 'mystical trees | A dark magical pond | dark'
examples_7 = 'A lake | A mountain | Cherry Blossoms next to the lake'
examples = [
[examples_6, "7.5 | 7.5 | -7.5", 'Stable_Diffusion_1v_4', 50, 8],
[examples_6, "7.5 | 7.5 | 7.5", 'Stable_Diffusion_1v_4', 50, 8],
[examples_1, "7.5 | -7.5", 'Stable_Diffusion_1v_4', 50, 0],
[examples_7, "7.5 | 7.5 | 7.5", 'Stable_Diffusion_1v_4', 50, 3],
[examples_5, "7.5 | 7.5", 'Stable_Diffusion_1v_4', 50, 0],
[examples_3, "7.5 | 7.5 | 7.5 | 7.5 | 7.5", 'CLEVR Objects', 100, 0]
]
title = 'Compositional Visual Generation with Composable Diffusion Models'
description = '<p>Our conjunction and negation (a.k.a. negative prompts) operators are also added into stable diffusion webui! (<a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Negative-prompt">Negation</a> and <a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui/commit/c26732fbee2a57e621ac22bf70decf7496daa4cd">Conjunction</a>)</p></p><p>See more information from our <a href="https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/">Project Page</a>.</p><ul><li>One version is based on the released <a href="https://github.com/openai/glide-text2im">GLIDE</a> and <a href="https://github.com/CompVis/stable-diffusion/">Stable Diffusion</a> for composing natural language description.</li><li>Another is based on our pre-trained CLEVR Object Model for composing objects. <br>(<b>Note</b>: We recommend using <b><i>x</i></b> in range <b><i>[0.1, 0.9]</i></b> and <b><i>y</i></b> in range <b><i>[0.25, 0.7]</i></b>, since the training dataset labels are in given ranges.)</li></ul><p>When composing multiple sentences, use `|` as the delimiter, see given examples below.</p><p>You can also specify the weight of each text by using `|` as the delimiter. When the weight is negative, it will use Negation Operator (NOT), which indicates the corresponding prompt is a negative prompt. Otherwise it will use Conjunction operator (AND).</p><p><b>Only Conjunction operator is enabled for CLEVR Object.</b></p><p><b>Note: When using Stable Diffusion, black images will be returned if the given prompt is detected as problematic. For composing GLIDE model, we recommend using the Colab demo in our <a href="https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/">Project Page</a>.</b></p>'
iface = gr.Interface(compose,
inputs=[
gr.Textbox(label='prompt', value='mystical trees | A dark magical pond | dark'),
gr.Textbox(label='weights', value='7.5 | 7.5 | -7.5'),
gr.Radio(['Stable_Diffusion_1v_4', 'CLEVR Objects'], type="value", label='version', value='Stable_Diffusion_1v_4'),
gr.Slider(10, 200, value=50),
gr.Number(2)
],
outputs='image', cache_examples=False,
title=title, description=description, examples=examples)
iface.launch() |