File size: 2,170 Bytes
2571a09
 
 
 
 
 
 
b40804f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2571a09
 
b40804f
2571a09
 
 
 
 
 
 
 
 
 
b40804f
2571a09
 
b40804f
2571a09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b40804f
2571a09
 
b40804f
3d77d68
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import gradio as gr
import torch
from PIL import Image
from diffusers import AutoPipelineForText2Image, DDIMScheduler
from transformers import CLIPVisionModelWithProjection
import numpy as np

# Initialize the pipeline with GPU support
pipeline = AutoPipelineForText2Image.from_pretrained(
    "stabilityai/stable-diffusion-xl-base-1.0",
    torch_dtype=torch.float16,
    device="cuda",  # Use GPU device if available
)

# Configure the scheduler for the pipeline
pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)

# Load IP adapter with specified weights and set the scale for each component
pipeline.load_ip_adapter(
    "h94/IP-Adapter",
    subfolder="sdxl_models",
    weight_name=[
        "ip-adapter-plus_sdxl_vit-h.safetensors",
        "ip-adapter-plus-face_sdxl_vit-h.safetensors"
    ]
)
pipeline.set_ip_adapter_scale([0.7, 0.5])

# Ensure the model and its components are moved to GPU
pipeline.to("cuda")

def transform_image(face_image):
    generator = torch.Generator(device="cuda").manual_seed(0)

    # Process the input face image
    if isinstance(face_image, Image.Image):
        processed_face_image = face_image
    elif isinstance(face_image, np.ndarray):
        processed_face_image = Image.fromarray(face_image)
    else:
        raise ValueError("Unsupported image format")

    # Load the style image from the local path
    style_image_path = "InstaSoyjak/soyjak2.jpeg"
    style_image = Image.open(style_image_path)

    # Perform the transformation using the configured pipeline
    image = pipeline(
        prompt="soyjak",
        ip_adapter_image=[style_image, processed_face_image],
        negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
        num_inference_steps=30,
        generator=generator,
    ).images[0]

    return image

# Gradio interface setup
demo = gr.Interface(
    fn=transform_image,
    inputs=gr.Image(label="Upload your face image"),
    outputs=gr.Image(label="Your Soyjak"),
    title="InstaSoyjak - turn anyone into a Soyjak",
    description="All you need to do is upload an image. Please use responsibly.",
)

demo.queue(max_size=20)
demo.launch()