Spaces:
Runtime error
Runtime error
File size: 6,942 Bytes
d4fe1e6 65e6336 d4fe1e6 65e6336 d4fe1e6 04c4c30 c39e9ff d4fe1e6 5d29bbd 8d6462e d4fe1e6 c39e9ff 241cc6f 204dc42 d4fe1e6 c39e9ff 8d6462e 04c4c30 c39e9ff eb601c1 d4fe1e6 5d29bbd bc31fa8 5d29bbd eb601c1 5d29bbd eb601c1 c39e9ff bc31fa8 5d29bbd eb601c1 1955fd3 eb601c1 5d29bbd eb601c1 1955fd3 5d29bbd c97c9f9 5d29bbd eb601c1 5d29bbd bc31fa8 38930b8 5d29bbd eb601c1 5d29bbd eb601c1 5d29bbd eb601c1 5219f50 3f591a2 eb601c1 5d29bbd 1955fd3 8712735 1955fd3 8d6462e 1955fd3 3767bc2 c39e9ff 3767bc2 e9116d0 c39e9ff 3767bc2 5d29bbd eb601c1 3f1a935 ff2dfb3 3f1a935 5343470 ff2dfb3 63a03db ff2dfb3 5343470 ff2dfb3 1955fd3 2442693 c52d702 c39e9ff 1955fd3 c4297be c39e9ff c4bd367 1955fd3 204dc42 eb601c1 c52d702 65e6336 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
# -*- coding: utf-8 -*-
"""Copy of compose_glide.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/19xx6Nu4FeiGj-TzTUFxBf-15IkeuFx_F
"""
import streamlit as st
import gradio as gr
import torch as th
from composable_diffusion.download import download_model
from composable_diffusion.model_creation import create_model_and_diffusion as create_model_and_diffusion_for_clevr
from composable_diffusion.model_creation import model_and_diffusion_defaults as model_and_diffusion_defaults_for_clevr
from torch import autocast
from diffusers import StableDiffusionPipeline
# This notebook supports both CPU and GPU.
# On CPU, generating one sample may take on the order of 20 minutes.
# On a GPU, it should be under a minute.
has_cuda = th.cuda.is_available()
device = th.device('cpu' if not th.cuda.is_available() else 'cuda')
print(device)
# init stable diffusion model
pipe = StableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
use_auth_token=st.secrets["USER_TOKEN"]
).to(device)
# create model for CLEVR Objects
clevr_options = model_and_diffusion_defaults_for_clevr()
flags = {
"image_size": 128,
"num_channels": 192,
"num_res_blocks": 2,
"learn_sigma": True,
"use_scale_shift_norm": False,
"raw_unet": True,
"noise_schedule": "squaredcos_cap_v2",
"rescale_learned_sigmas": False,
"rescale_timesteps": False,
"num_classes": '2',
"dataset": "clevr_pos",
"use_fp16": has_cuda,
"timestep_respacing": '100'
}
for key, val in flags.items():
clevr_options[key] = val
clevr_model, clevr_diffusion = create_model_and_diffusion_for_clevr(**clevr_options)
clevr_model.eval()
if has_cuda:
clevr_model.convert_to_fp16()
clevr_model.to(device)
clevr_model.load_state_dict(th.load(download_model('clevr_pos'), device))
print('total clevr_pos parameters', sum(x.numel() for x in clevr_model.parameters()))
def compose_clevr_objects(prompt, guidance_scale, steps):
coordinates = [[float(x.split(',')[0].strip()), float(x.split(',')[1].strip())]
for x in prompt.split('|')]
coordinates += [[-1, -1]] # add unconditional score label
batch_size = 1
clevr_options['timestep_respacing'] = str(int(steps))
_, clevr_diffusion = create_model_and_diffusion_for_clevr(**clevr_options)
def model_fn(x_t, ts, **kwargs):
half = x_t[:1]
combined = th.cat([half] * kwargs['y'].size(0), dim=0)
model_out = clevr_model(combined, ts, **kwargs)
eps, rest = model_out[:, :3], model_out[:, 3:]
masks = kwargs.get('masks')
cond_eps = eps[masks].mean(dim=0, keepdim=True)
uncond_eps = eps[~masks].mean(dim=0, keepdim=True)
half_eps = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
eps = th.cat([half_eps] * x_t.size(0), dim=0)
return th.cat([eps, rest], dim=1)
def sample(coordinates):
masks = [True] * (len(coordinates) - 1) + [False]
model_kwargs = dict(
y=th.tensor(coordinates, dtype=th.float, device=device),
masks=th.tensor(masks, dtype=th.bool, device=device)
)
samples = clevr_diffusion.p_sample_loop(
model_fn,
(len(coordinates), 3, clevr_options["image_size"], clevr_options["image_size"]),
device=device,
clip_denoised=True,
progress=True,
model_kwargs=model_kwargs,
cond_fn=None,
)[:batch_size]
return samples
samples = sample(coordinates)
out_img = samples[0].permute(1, 2, 0)
out_img = (out_img + 1) / 2
out_img = (out_img.detach().cpu() * 255.).to(th.uint8)
out_img = out_img.numpy()
return out_img
def stable_diffusion_compose(prompt, scale, steps):
with autocast('cpu' if not th.cuda.is_available() else 'cuda'):
image = pipe(prompt, guidance_scale=scale, num_inference_steps=steps)["sample"][0]
return image
def compose(prompt, version, guidance_scale, steps):
try:
with th.no_grad():
if version == 'Stable_Diffusion_1v_4':
return stable_diffusion_compose(prompt, guidance_scale, steps)
else:
return compose_clevr_objects(prompt, guidance_scale, steps)
except Exception as e:
print(e)
return None
examples_1 = 'a camel | a forest'
examples_2 = 'A blue sky | A mountain in the horizon | Cherry Blossoms in front of the mountain'
examples_3 = '0.1, 0.5 | 0.3, 0.5 | 0.5, 0.5 | 0.7, 0.5 | 0.9, 0.5'
examples_4 = 'a blue house | a desert'
examples_5 = 'a white church | lightning in the background'
examples_6 = 'a camel | arctic'
examples_7 = 'A lake | A mountain | Cherry Blossoms next to the lake'
examples = [
[examples_7, 'Stable_Diffusion_1v_4', 15, 50],
[examples_5, 'Stable_Diffusion_1v_4', 15, 50],
[examples_4, 'Stable_Diffusion_1v_4', 15, 50],
[examples_6, 'Stable_Diffusion_1v_4', 15, 50],
[examples_3, 'CLEVR Objects', 10, 100]
]
title = 'Compositional Visual Generation with Composable Diffusion Models'
description = '<p>Demo for Composable Diffusion<ul><li>~30s per Stable-Diffusion example</li><li>~10s per CLEVR Object example</li>(<b>Note</b>: time is varied depending on what gpu is used.)</ul></p><p>See more information from our <a href="https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/">Project Page</a>.</p><ul><li>One version is based on the released <a href="https://github.com/openai/glide-text2im">GLIDE</a> and <a href="https://github.com/CompVis/stable-diffusion/">Stable Diffusion</a> for composing natural language description.</li><li>Another is based on our pre-trained CLEVR Object Model for composing objects. <br>(<b>Note</b>: We recommend using <b><i>x</i></b> in range <b><i>[0.1, 0.9]</i></b> and <b><i>y</i></b> in range <b><i>[0.25, 0.7]</i></b>, since the training dataset labels are in given ranges.)</li></ul><p>When composing multiple sentences, use `|` as the delimiter, see given examples below.</p><p><b>Note: When using Stable Diffusion, black images will be returned if the given prompt is detected as problematic. For composing GLIDE model, we recommend using the Colab demo in our <a href="https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/">Project Page</a>.</b></p>'
iface = gr.Interface(compose,
inputs=[
"text",
gr.Radio(['Stable_Diffusion_1v_4', 'CLEVR Objects'], type="value", label='version'),
gr.Slider(2, 30),
gr.Slider(10, 200)
],
outputs='image', cache_examples=False,
title=title, description=description, examples=examples)
iface.launch()
|