import gradio as gr import torch import pickle from torchvision.utils import save_image import numpy as np from diffusers import StableDiffusionUpscalePipeline from huggingface_hub import hf_hub_download model_path = hf_hub_download(repo_id="Shaoan/ConceptGAN", filename="augceleba_8064.pkl") with open(model_path, 'rb') as f: G = pickle.load(f)['G_ema'].cpu().float() # torch.nn.Module cchoices = ['Bald', 'Black Hair', 'Blond Hair', 'Smiling', 'NoSmile', 'Male', 'Female' ] model_choices = [ 'Change Dim = 8', 'Change Dim = 15', 'Change Dim = 30', 'Change Dim = 60' ] cchoices = [ 'Big Nose', 'Black Hair', 'Blond Hair', 'Chubby', 'Eyeglasses', 'Male', 'Pale Skin', 'Smiling', 'Straight Hair', 'Wavy Hair', 'Wearing Hat', 'Young' ] import requests from PIL import Image from io import BytesIO from diffusers import LDMSuperResolutionPipeline import torch device = "cuda" if torch.cuda.is_available() else "cpu" model_id = "CompVis/ldm-super-resolution-4x-openimages" # load model and scheduler pipeline = LDMSuperResolutionPipeline.from_pretrained(model_id) pipeline = pipeline.to(device) model_id = "stabilityai/stable-diffusion-x4-upscaler" text_pipeline = StableDiffusionUpscalePipeline.from_pretrained( model_id, variant="fp32", torch_dtype=torch.float32 ) # let's download an image def super_res(low_res_img, num_steps): # run pipeline in inference (sample random noise and denoise) upscaled_image = pipeline(low_res_img, num_inference_steps=num_steps, eta=1).images[0] #upscaled_image = text_pipeline(prompt="a sharp image of human face", image=low_res_img, num_inference_steps=75).images[0] return upscaled_image @torch.no_grad() def generate(seed, upscale, upscale_steps,*checkboxes): z = torch.randn([1, G.z_dim], generator=torch.Generator().manual_seed(seed)) #m = torch.tensor([[1, 0, 0, 0, 1, 1, 0.]]).repeat(1, 1) checkboxes_vector = torch.zeros([20]) for i in range(len(checkboxes)): if i == 1: checkboxes_vector[cchoices.index('Black Hair')] = checkboxes[i] elif i == 2: checkboxes_vector[cchoices.index('Blond Hair')] = checkboxes[i] elif i == 3: checkboxes_vector[cchoices.index('Straight Hair')] = checkboxes[i] elif i == 4: checkboxes_vector[cchoices.index('Wavy Hair')] = checkboxes[i] elif i == 5: checkboxes_vector[cchoices.index('Young')] = checkboxes[i] * 2 elif i == 6: checkboxes_vector[cchoices.index('Male')] = checkboxes[i] elif i == 9: checkboxes_vector[cchoices.index('Big Nose')] = checkboxes[i] elif i == 10: checkboxes_vector[cchoices.index('Chubby')] = checkboxes[i] elif i == 11: checkboxes_vector[cchoices.index('Eyeglasses')] = checkboxes[i] * 2 elif i == 12: checkboxes_vector[cchoices.index('Pale Skin')] = checkboxes[i] elif i == 13: checkboxes_vector[cchoices.index('Smiling')] = checkboxes[i] elif i == 14: checkboxes_vector[cchoices.index('Wearing Hat')] = checkboxes[i] * 2 is_young = checkboxes[5] is_male = checkboxes[6] is_bald = checkboxes[0] is_goatee = checkboxes[7] is_mustache = checkboxes[8] checkboxes_vector[12] = is_mustache * 2 checkboxes_vector[13] = is_mustache * 2 checkboxes_vector[14] = is_goatee *2 checkboxes_vector[15] = is_goatee*2 checkboxes_vector[16] = is_bald checkboxes_vector[17] = is_bald checkboxes_vector[18] = is_bald checkboxes_vector[19] = is_bald print(checkboxes_vector) m = checkboxes_vector.view(1, 20) ws = G.mapping(z, m, truncation_psi=0.5) img = (G.synthesis(ws, force_fp32=True).clip(-1,1)+1)/2 if upscale: up_img = np.array(super_res(img*2-1, upscale_steps)) return up_img else: return img[0].permute(1, 2, 0).numpy() # Create the interface using gr.Blocks with gr.Blocks() as demo: with gr.Row(): slider1 = gr.Slider(label='Not Bald <--------------> Bald', minimum=0, maximum=1, step=0.01) slider2 = gr.Slider(label='No Black Hair <--------> Black Hair', minimum=0, maximum=1, step=0.01) slider3 = gr.Slider(label='No Blond Hair <--------> Blond Hair', minimum=0, maximum=1, step=0.01) slider4 = gr.Slider(label='No Straight Hair <-----> Straight Hair', minimum=0, maximum=1, step=0.01) slider5 = gr.Slider(label='No Wavy Hair <-------> Wavy Hair', minimum=0, maximum=1, step=0.01) sliders = [ slider1, slider2, slider3, slider4, slider5] with gr.Row(): sliders += [gr.Slider(label='Old <--------------> Young', minimum=0, maximum=1, step=0.01)] sliders += [gr.Slider(label='Female <--------------> Male', minimum=0, maximum=1, step=0.01)] with gr.Row(): sliders += [gr.Slider(label='No Goatee <--------------> Goatee', minimum=0, maximum=1, step=0.01)] sliders += [gr.Slider(label='No Mustache <--------------> Mustache', minimum=0, maximum=1, step=0.01)] with gr.Row(): sliders += [ gr.Slider(label='Small Nose <-------> Big Nose', minimum=0, maximum=1, step=0.01), gr.Slider(label='Slim <--------> Chubby', minimum=0, maximum=1, step=0.01), gr.Slider(label='No Eyeglasses <--------> Eyeglasses', minimum=0, maximum=1, step=0.01), gr.Slider(label='Tan Skin <-------> Pale Skin', minimum=0, maximum=1, step=0.01), gr.Slider(label='Not Smiling <---------> Smiling', minimum=0, maximum=1, step=0.01), gr.Slider(label='No Hat <---------> Wearing Hat', minimum=0, maximum=1, step=0.01), ] seed_input = gr.Number(label="Seed", value=6) upscale_funcs = [] with gr.Row(): upscale_funcs = [gr.Checkbox(label="Upscale 4x")] upscale_funcs += [gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=10)] generate_button = gr.Button("Generate") output_image = gr.Image(label="Generated Image") for slider in sliders: slider.change(fn=generate, inputs=[seed_input] + upscale_funcs + sliders, outputs=output_image) # Set the action for the button generate_button.click(fn=generate, inputs=[seed_input] + upscale_funcs +sliders, outputs=output_image) # Launch the demo demo.launch()