AP123 commited on
Commit
b40804f
1 Parent(s): 3d77d68

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -23
app.py CHANGED
@@ -4,17 +4,33 @@ from PIL import Image
4
  from diffusers import AutoPipelineForText2Image, DDIMScheduler
5
  from transformers import CLIPVisionModelWithProjection
6
  import numpy as np
7
- import spaces # Ensure this is available in your environment
8
 
9
- # Initialize a zero tensor for demonstration purposes
10
- zero = torch.Tensor([0]).cuda()
11
- print(zero.device) # Should output 'cuda:0' if a GPU is available
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- @spaces.GPU # Decorate the function to run on GPU
14
  def transform_image(face_image):
15
- print(zero.device) # Check the device inside the function, should be 'cuda:0'
16
-
17
- generator = torch.Generator(device="cuda").manual_seed(0) # Use GPU device if available
18
 
19
  # Process the input face image
20
  if isinstance(face_image, Image.Image):
@@ -25,10 +41,10 @@ def transform_image(face_image):
25
  raise ValueError("Unsupported image format")
26
 
27
  # Load the style image from the local path
28
- style_image_path = "/content/soyjak2.jpeg"
29
  style_image = Image.open(style_image_path)
30
 
31
- # Perform the transformation using the GPU
32
  image = pipeline(
33
  prompt="soyjak",
34
  ip_adapter_image=[style_image, processed_face_image],
@@ -39,25 +55,14 @@ def transform_image(face_image):
39
 
40
  return image
41
 
42
- # Load models and configure pipeline with GPU support
43
- pipeline = AutoPipelineForText2Image.from_pretrained(
44
- "stabilityai/stable-diffusion-xl-base-1.0",
45
- torch_dtype=torch.float16, # Consider using torch.float32 for GPU computations
46
- device="cuda", # Use GPU device if available
47
- ).to("cuda") # Ensure the model is moved to GPU
48
-
49
- # Additional pipeline configurations
50
- pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config).to("cuda")
51
- pipeline.enable_model_cpu_offload(False) # Consider not offloading to CPU when using GPU
52
-
53
  # Gradio interface setup
54
  demo = gr.Interface(
55
  fn=transform_image,
56
  inputs=gr.Image(label="Upload your face image"),
57
  outputs=gr.Image(label="Your Soyjak"),
58
  title="InstaSoyjak - turn anyone into a Soyjak",
59
- description="All you need to do is upload an image. Please use responsibly. Please follow me on Twitter if you like this space: https://twitter.com/angrypenguinPNG. Idea from Yacine, please give him a follow: https://twitter.com/yacineMTB.",
60
  )
61
 
62
- demo.queue(max_size=20) # Configures the queue with a maximum size of 20
63
  demo.launch()
 
4
  from diffusers import AutoPipelineForText2Image, DDIMScheduler
5
  from transformers import CLIPVisionModelWithProjection
6
  import numpy as np
 
7
 
8
+ # Initialize the pipeline with GPU support
9
+ pipeline = AutoPipelineForText2Image.from_pretrained(
10
+ "stabilityai/stable-diffusion-xl-base-1.0",
11
+ torch_dtype=torch.float16,
12
+ device="cuda", # Use GPU device if available
13
+ )
14
+
15
+ # Configure the scheduler for the pipeline
16
+ pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
17
+
18
+ # Load IP adapter with specified weights and set the scale for each component
19
+ pipeline.load_ip_adapter(
20
+ "h94/IP-Adapter",
21
+ subfolder="sdxl_models",
22
+ weight_name=[
23
+ "ip-adapter-plus_sdxl_vit-h.safetensors",
24
+ "ip-adapter-plus-face_sdxl_vit-h.safetensors"
25
+ ]
26
+ )
27
+ pipeline.set_ip_adapter_scale([0.7, 0.5])
28
+
29
+ # Ensure the model and its components are moved to GPU
30
+ pipeline.to("cuda")
31
 
 
32
  def transform_image(face_image):
33
+ generator = torch.Generator(device="cuda").manual_seed(0)
 
 
34
 
35
  # Process the input face image
36
  if isinstance(face_image, Image.Image):
 
41
  raise ValueError("Unsupported image format")
42
 
43
  # Load the style image from the local path
44
+ style_image_path = "InstaSoyjak/soyjak2.jpeg"
45
  style_image = Image.open(style_image_path)
46
 
47
+ # Perform the transformation using the configured pipeline
48
  image = pipeline(
49
  prompt="soyjak",
50
  ip_adapter_image=[style_image, processed_face_image],
 
55
 
56
  return image
57
 
 
 
 
 
 
 
 
 
 
 
 
58
  # Gradio interface setup
59
  demo = gr.Interface(
60
  fn=transform_image,
61
  inputs=gr.Image(label="Upload your face image"),
62
  outputs=gr.Image(label="Your Soyjak"),
63
  title="InstaSoyjak - turn anyone into a Soyjak",
64
+ description="All you need to do is upload an image. Please use responsibly.",
65
  )
66
 
67
+ demo.queue(max_size=20)
68
  demo.launch()