Spaces:
Runtime error
Runtime error
File size: 2,785 Bytes
ca1f90f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
import gradio as gr
import torch
from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
from diffusers import StableDiffusionInpaintPipeline
from PIL import Image, ImageOps
import PIL
# cuda cpu
device_name = 'cpu'
device = torch.device(device_name)
processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined").to(device)
inpainting_pipeline = StableDiffusionInpaintPipeline.from_pretrained("stabilityai/stable-diffusion-2-inpainting").to(device)
def numpy_to_pil(images):
if images.ndim == 3:
images = images[None, ...]
images = (images * 255).round().astype("uint8")
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
else:
pil_images = [Image.fromarray(image) for image in images]
return pil_images
def get_mask(text, image):
inputs = processor(
text=[text], images=[image], padding="max_length", return_tensors="pt"
).to(device)
outputs = model(**inputs)
mask = torch.sigmoid(outputs.logits).cpu().detach().unsqueeze(-1).numpy()
mask_pil = numpy_to_pil(mask)[0].resize(image.size)
#mask_pil.show()
return mask_pil
def predict(prompt, negative_prompt, image, obj2mask):
mask = get_mask(obj2mask, image)
image = image.convert("RGB").resize((512, 512))
mask_image = mask.convert("RGB").resize((512, 512))
mask_image = ImageOps.invert(mask_image)
images = inpainting_pipeline(prompt=prompt, negative_prompt=negative_prompt, image=image,
mask_image=mask_image).images
mask = mask_image.convert('L')
PIL.Image.composite(images[0], image, mask)
return (images[0])
def inference(prompt, negative_prompt, obj2mask, image_numpy):
generator = torch.Generator()
generator.manual_seed(int(52362))
image = numpy_to_pil(image_numpy)[0].convert("RGB").resize((512, 512))
img = predict(prompt, negative_prompt, image, obj2mask)
return img
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="Prompt", value="cinematic, landscape, sharpe focus")
negative_prompt = gr.Textbox(label="Negative Prompt", value="illustration, 3d render")
mask = gr.Textbox(label="Mask", value="shoe")
intput_img = gr.Image()
run = gr.Button(value="Generate")
with gr.Column():
output_img = gr.Image()
run.click(
inference,
inputs=[prompt, negative_prompt, mask, intput_img
],
outputs=output_img,
)
demo.queue(concurrency_count=1)
demo.launch()
|