new
Browse files- run_local.py +2 -2
- run_pix2pix0.py +62 -0
run_local.py
CHANGED
@@ -15,9 +15,9 @@ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
|
15 |
pipe = pipe.to("cuda")
|
16 |
|
17 |
prompt = "a highly realistic photo of green turtle"
|
18 |
-
|
19 |
generator = torch.Generator(device="cuda").manual_seed(0)
|
20 |
-
image = pipe(prompt, generator=generator, num_inference_steps=
|
21 |
print("Time", time.time() - start_time)
|
22 |
|
23 |
path = "/home/patrick_huggingface_co/images/aa.png"
|
|
|
15 |
pipe = pipe.to("cuda")
|
16 |
|
17 |
prompt = "a highly realistic photo of green turtle"
|
18 |
+
|
19 |
generator = torch.Generator(device="cuda").manual_seed(0)
|
20 |
+
image = pipe(prompt, generator=generator, num_inference_steps=15).images[0]
|
21 |
print("Time", time.time() - start_time)
|
22 |
|
23 |
path = "/home/patrick_huggingface_co/images/aa.png"
|
run_pix2pix0.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
from huggingface_hub import HfApi
|
3 |
+
import torch
|
4 |
+
|
5 |
+
import requests
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
from diffusers import DDIMScheduler, StableDiffusionPix2PixZeroPipeline
|
9 |
+
from diffusers.schedulers.scheduling_ddim_inverse import DDIMInverseScheduler
|
10 |
+
from transformers import BlipForConditionalGeneration, BlipProcessor
|
11 |
+
|
12 |
+
api = HfApi()
|
13 |
+
img_url = "https://github.com/pix2pixzero/pix2pix-zero/raw/main/assets/test_images/cats/cat_6.png"
|
14 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB').resize((512, 512))
|
15 |
+
|
16 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
17 |
+
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float16, low_cpu_mem_usage=True)
|
18 |
+
|
19 |
+
model_ckpt = "CompVis/stable-diffusion-v1-4"
|
20 |
+
pipeline = StableDiffusionPix2PixZeroPipeline.from_pretrained(
|
21 |
+
model_ckpt, caption_generator=model, caption_processor=processor, torch_dtype=torch.float16, safety_checker=None,
|
22 |
+
)
|
23 |
+
pipeline.enable_model_cpu_offload()
|
24 |
+
|
25 |
+
caption = pipeline.generate_caption(raw_image)
|
26 |
+
|
27 |
+
pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
|
28 |
+
pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)
|
29 |
+
|
30 |
+
print(caption)
|
31 |
+
|
32 |
+
generator = torch.manual_seed(0)
|
33 |
+
inv_latents = pipeline.invert(caption, image=raw_image, generator=generator).latents
|
34 |
+
|
35 |
+
source_prompts = 4 * ["a cat sitting on the street", "a cat playing in the field", "a face of a cat"]
|
36 |
+
target_prompts = 4 * ["a dog sitting on the street", "a dog playing in the field", "a face of a dog"]
|
37 |
+
|
38 |
+
source_embeds = pipeline.get_embeds(source_prompts, batch_size=2)
|
39 |
+
target_embeds = pipeline.get_embeds(target_prompts, batch_size=2)
|
40 |
+
|
41 |
+
|
42 |
+
image = pipeline(
|
43 |
+
caption,
|
44 |
+
source_embeds=source_embeds,
|
45 |
+
target_embeds=target_embeds,
|
46 |
+
num_inference_steps=50,
|
47 |
+
cross_attention_guidance_amount=0.15,
|
48 |
+
generator=generator,
|
49 |
+
latents=inv_latents,
|
50 |
+
negative_prompt=caption,
|
51 |
+
).images[0]
|
52 |
+
|
53 |
+
path = "/home/patrick_huggingface_co/images/aa.png"
|
54 |
+
image.save(path)
|
55 |
+
|
56 |
+
api.upload_file(
|
57 |
+
path_or_fileobj=path,
|
58 |
+
path_in_repo=path.split("/")[-1],
|
59 |
+
repo_id="patrickvonplaten/images",
|
60 |
+
repo_type="dataset",
|
61 |
+
)
|
62 |
+
print("https://huggingface.co/datasets/patrickvonplaten/images/blob/main/aa.png")
|