Spaces:
Runtime error
Runtime error
Fix (#1)
Browse files- Fix (28ed6816a78dd70c0548e027d85e8a503b9e688b)
Co-authored-by: hysts <hysts@users.noreply.huggingface.co>
app.py
CHANGED
@@ -4,7 +4,7 @@ from PIL import Image
|
|
4 |
from diffusers import AutoPipelineForText2Image, DDIMScheduler
|
5 |
from transformers import CLIPVisionModelWithProjection
|
6 |
import numpy as np
|
7 |
-
import spaces
|
8 |
|
9 |
|
10 |
image_encoder = CLIPVisionModelWithProjection.from_pretrained(
|
@@ -25,7 +25,7 @@ pipeline.set_ip_adapter_scale([0.7, 0.5])
|
|
25 |
|
26 |
pipeline.enable_model_cpu_offload()
|
27 |
|
28 |
-
@spaces.
|
29 |
def transform_image(face_image):
|
30 |
generator = torch.Generator(device="cpu").manual_seed(0)
|
31 |
|
@@ -39,7 +39,7 @@ def transform_image(face_image):
|
|
39 |
raise ValueError("Unsupported image format")
|
40 |
|
41 |
# Load the style image from the local path
|
42 |
-
style_image_path = "/
|
43 |
style_image = Image.open(style_image_path)
|
44 |
|
45 |
# Perform the transformation
|
@@ -63,4 +63,4 @@ demo = gr.Interface(
|
|
63 |
)
|
64 |
|
65 |
demo.queue(max_size=20) # Configures the queue with a maximum size of 20
|
66 |
-
demo.launch()
|
|
|
4 |
from diffusers import AutoPipelineForText2Image, DDIMScheduler
|
5 |
from transformers import CLIPVisionModelWithProjection
|
6 |
import numpy as np
|
7 |
+
import spaces
|
8 |
|
9 |
|
10 |
image_encoder = CLIPVisionModelWithProjection.from_pretrained(
|
|
|
25 |
|
26 |
pipeline.enable_model_cpu_offload()
|
27 |
|
28 |
+
@spaces.GPU
|
29 |
def transform_image(face_image):
|
30 |
generator = torch.Generator(device="cpu").manual_seed(0)
|
31 |
|
|
|
39 |
raise ValueError("Unsupported image format")
|
40 |
|
41 |
# Load the style image from the local path
|
42 |
+
style_image_path = "examples/soyjak2.jpg"
|
43 |
style_image = Image.open(style_image_path)
|
44 |
|
45 |
# Perform the transformation
|
|
|
63 |
)
|
64 |
|
65 |
demo.queue(max_size=20) # Configures the queue with a maximum size of 20
|
66 |
+
demo.launch()
|