poipiii
commited on
Commit
•
b00902f
1
Parent(s):
0efbf9b
test in latnent upcale
Browse files- pipeline.py +10 -6
pipeline.py
CHANGED
@@ -811,12 +811,16 @@ class StableDiffusionLongPromptWeightingPipeline(StableDiffusionPipeline):
|
|
811 |
|
812 |
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
813 |
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
814 |
-
print("before denoise latents")
|
815 |
print(latents.shape)
|
816 |
# 8. Denoising loop
|
817 |
for i, t in enumerate(self.progress_bar(timesteps)):
|
818 |
# expand the latents if we are doing classifier free guidance
|
819 |
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
|
|
|
|
|
|
|
|
820 |
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
821 |
|
822 |
# predict the noise residual
|
@@ -842,7 +846,7 @@ class StableDiffusionLongPromptWeightingPipeline(StableDiffusionPipeline):
|
|
842 |
if is_cancelled_callback is not None and is_cancelled_callback():
|
843 |
return None
|
844 |
print("after first step denoise latents")
|
845 |
-
print(latents)
|
846 |
print(latents.shape)
|
847 |
latents = torch.nn.functional.interpolate(
|
848 |
latents, size=(int(height*resize_scale)//8, int(width*resize_scale)//8))
|
@@ -851,8 +855,8 @@ class StableDiffusionLongPromptWeightingPipeline(StableDiffusionPipeline):
|
|
851 |
# expand the latents if we are doing classifier free guidance
|
852 |
latent_model_input = torch.cat(
|
853 |
[latents] * 2) if do_classifier_free_guidance else latents
|
854 |
-
print("latent_model_input")
|
855 |
-
print(latent_model_input)
|
856 |
print(latent_model_input.shape)
|
857 |
|
858 |
print("2nd step timestep")
|
@@ -861,13 +865,13 @@ class StableDiffusionLongPromptWeightingPipeline(StableDiffusionPipeline):
|
|
861 |
latent_model_input = self.scheduler.scale_model_input(
|
862 |
latent_model_input, t)
|
863 |
print("latent_model_input after scheduler")
|
864 |
-
print(latent_model_input)
|
865 |
print(latent_model_input.shape)
|
866 |
# predict the noise residual
|
867 |
noise_pred = self.unet(latent_model_input, t,
|
868 |
encoder_hidden_states=text_embeddings).sample
|
869 |
print("noise_pred")
|
870 |
-
print(noise_pred)
|
871 |
print(noise_pred.shape)
|
872 |
# perform guidance
|
873 |
if do_classifier_free_guidance:
|
|
|
811 |
|
812 |
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
813 |
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
814 |
+
# print("before denoise latents")
|
815 |
print(latents.shape)
|
816 |
# 8. Denoising loop
|
817 |
for i, t in enumerate(self.progress_bar(timesteps)):
|
818 |
# expand the latents if we are doing classifier free guidance
|
819 |
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
820 |
+
print("latent_model_input 1st step")
|
821 |
+
# print(latent_model_input)
|
822 |
+
print(latent_model_input.shape)
|
823 |
+
|
824 |
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
825 |
|
826 |
# predict the noise residual
|
|
|
846 |
if is_cancelled_callback is not None and is_cancelled_callback():
|
847 |
return None
|
848 |
print("after first step denoise latents")
|
849 |
+
# print(latents)
|
850 |
print(latents.shape)
|
851 |
latents = torch.nn.functional.interpolate(
|
852 |
latents, size=(int(height*resize_scale)//8, int(width*resize_scale)//8))
|
|
|
855 |
# expand the latents if we are doing classifier free guidance
|
856 |
latent_model_input = torch.cat(
|
857 |
[latents] * 2) if do_classifier_free_guidance else latents
|
858 |
+
print("latent_model_input 2nd step")
|
859 |
+
# print(latent_model_input)
|
860 |
print(latent_model_input.shape)
|
861 |
|
862 |
print("2nd step timestep")
|
|
|
865 |
latent_model_input = self.scheduler.scale_model_input(
|
866 |
latent_model_input, t)
|
867 |
print("latent_model_input after scheduler")
|
868 |
+
# print(latent_model_input)
|
869 |
print(latent_model_input.shape)
|
870 |
# predict the noise residual
|
871 |
noise_pred = self.unet(latent_model_input, t,
|
872 |
encoder_hidden_states=text_embeddings).sample
|
873 |
print("noise_pred")
|
874 |
+
# print(noise_pred)
|
875 |
print(noise_pred.shape)
|
876 |
# perform guidance
|
877 |
if do_classifier_free_guidance:
|