Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
import gradio as gr | |
import torch | |
import pickle | |
from torchvision.utils import save_image | |
import numpy as np | |
from diffusers import StableDiffusionUpscalePipeline | |
with open('../concept_checkpoints/augceleba_4838.pkl', 'rb') as f: | |
G = pickle.load(f)['G_ema'].cpu().float() # torch.nn.Module | |
cchoices = ['Bald', | |
'Black Hair', | |
'Blond Hair', | |
'Smiling', | |
'NoSmile', | |
'Male', | |
'Female' | |
] | |
model_choices = [ | |
'Change Dim = 8', | |
'Change Dim = 15', | |
'Change Dim = 30', | |
'Change Dim = 60' | |
] | |
cchoices = [ | |
'Big Nose', | |
'Black Hair', | |
'Blond Hair', | |
'Chubby', | |
'Eyeglasses', | |
'Male', | |
'Pale Skin', | |
'Smiling', | |
'Straight Hair', | |
'Wavy Hair', | |
'Wearing Hat', | |
'Young' | |
] | |
import requests | |
from PIL import Image | |
from io import BytesIO | |
from diffusers import LDMSuperResolutionPipeline | |
import torch | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
model_id = "CompVis/ldm-super-resolution-4x-openimages" | |
# load model and scheduler | |
pipeline = LDMSuperResolutionPipeline.from_pretrained(model_id) | |
pipeline = pipeline.to(device) | |
model_id = "stabilityai/stable-diffusion-x4-upscaler" | |
pipeline = StableDiffusionUpscalePipeline.from_pretrained( | |
model_id, variant="fp32", torch_dtype=torch.float32 | |
) | |
# let's download an image | |
def super_res(low_res_img): | |
# run pipeline in inference (sample random noise and denoise) | |
#upscaled_image = pipeline(low_res_img, num_inference_steps=10, eta=1).images[0] | |
upscaled_image = pipeline(prompt="a sharp image of human face", image=low_res_img, num_inference_steps=10).images[0] | |
return upscaled_image | |
def generate(seed, *checkboxes): | |
z = torch.randn([1, G.z_dim], generator=torch.Generator().manual_seed(seed)) | |
#m = torch.tensor([[1, 0, 0, 0, 1, 1, 0.]]).repeat(1, 1) | |
checkboxes_vector = torch.zeros([20]) | |
for i in range(len(checkboxes)): | |
if i == 1: | |
checkboxes_vector[cchoices.index('Black Hair')] = checkboxes[i] | |
elif i == 2: | |
checkboxes_vector[cchoices.index('Blond Hair')] = checkboxes[i] | |
elif i == 3: | |
checkboxes_vector[cchoices.index('Straight Hair')] = checkboxes[i] | |
elif i == 4: | |
checkboxes_vector[cchoices.index('Wavy Hair')] = checkboxes[i] | |
elif i == 5: | |
checkboxes_vector[cchoices.index('Young')] = checkboxes[i] | |
elif i == 6: | |
checkboxes_vector[cchoices.index('Male')] = checkboxes[i] | |
elif i == 9: | |
checkboxes_vector[cchoices.index('Big Nose')] = checkboxes[i] | |
elif i == 10: | |
checkboxes_vector[cchoices.index('Chubby')] = checkboxes[i] | |
elif i == 11: | |
checkboxes_vector[cchoices.index('Eyeglasses')] = checkboxes[i] | |
elif i == 12: | |
checkboxes_vector[cchoices.index('Pale Skin')] = checkboxes[i] | |
elif i == 13: | |
checkboxes_vector[cchoices.index('Smiling')] = checkboxes[i] | |
elif i == 14: | |
checkboxes_vector[cchoices.index('Wearing Hat')] = checkboxes[i] * 1.5 | |
is_young = checkboxes[5] | |
is_male = checkboxes[6] | |
is_bald = checkboxes[0] | |
is_goatee = checkboxes[7] | |
is_mustache = checkboxes[8] | |
checkboxes_vector[12] = is_mustache * 1.5 | |
checkboxes_vector[13] = is_mustache * 1.5 | |
checkboxes_vector[14] = is_goatee *1.5 | |
checkboxes_vector[15] = is_goatee*1.5 | |
checkboxes_vector[16] = is_bald | |
checkboxes_vector[17] = is_bald | |
checkboxes_vector[18] = is_bald | |
checkboxes_vector[19] = is_bald | |
print(checkboxes_vector) | |
m = checkboxes_vector.view(1, 20) | |
ws = G.mapping(z, m, truncation_psi=0.5) | |
img = (G.synthesis(ws, force_fp32=True).clip(-1,1)+1)/2 | |
up_img = np.array(super_res(img)) | |
print(img.min(), img.max(), up_img.min(), up_img.max(), ' >>>>>>image sis zee') | |
#return img[0].permute(1, 2, 0).numpy() | |
return up_img | |
# Create the interface using gr.Blocks | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
sliders = [ | |
gr.Slider(label='Bald', minimum=0, maximum=1, step=0.01), | |
gr.Slider(label='Black Hair', minimum=0, maximum=1, step=0.01), | |
gr.Slider(label='Blond Hair', minimum=0, maximum=1, step=0.01), | |
gr.Slider(label='Straight Hair', minimum=0, maximum=1, step=0.01), | |
gr.Slider(label='Wavy Hair', minimum=0, maximum=1, step=0.01), | |
] | |
with gr.Row(): | |
sliders += [gr.Slider(label='Young', minimum=0, maximum=1, step=0.01)] | |
sliders += [gr.Slider(label='Male', minimum=0, maximum=1, step=0.01)] | |
with gr.Row(): | |
sliders += [gr.Slider(label='Goatee', minimum=0, maximum=1, step=0.01)] | |
sliders += [gr.Slider(label='Mustache', minimum=0, maximum=1, step=0.01)] | |
with gr.Row(): | |
sliders += [ | |
gr.Slider(label='Big Nose', minimum=0, maximum=1, step=0.01), | |
gr.Slider(label='Chubby', minimum=0, maximum=1, step=0.01), | |
gr.Slider(label='Eyeglasses', minimum=0, maximum=1, step=0.01), | |
gr.Slider(label='Pale Skin', minimum=0, maximum=1, step=0.01), | |
gr.Slider(label='Smiling', minimum=0, maximum=1, step=0.01), | |
gr.Slider(label='Wearing Hat', minimum=0, maximum=1, step=0.01), | |
] | |
seed_input = gr.Number(label="Seed") | |
generate_button = gr.Button("Generate") | |
output_image = gr.Image(label="Generated Image") | |
# Set the action for the button | |
generate_button.click(fn=generate, inputs=[seed_input] + sliders, outputs=output_image) | |
# Launch the demo | |
demo.launch() |