DecoderWQH666 commited on
Commit
7e2c35e
1 Parent(s): 92b6ce4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -18,7 +18,7 @@ from utils import text_encoder_forward
18
  from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
19
  from utils import latents_to_images, downsampling, merge_and_save_images
20
  from omegaconf import OmegaConf
21
- from accelerate.utils import set_seed
22
  from tqdm import tqdm
23
  from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
24
  from PIL import Image
@@ -133,7 +133,7 @@ woman_Embedding_Manager = models.embedding_manager.EmbeddingManagerId_adain(
133
  text_encoder.text_model.embeddings.forward = original_forward
134
 
135
  DEFAULT_STYLE_NAME = "Watercolor"
136
- MAX_SEED = np.iinfo(np.int32).max
137
 
138
 
139
  def replace_phrases(prompt):
@@ -185,6 +185,7 @@ def generate_image(chose_emb, choice, gender_GAN, prompts_array):
185
  os.makedirs(save_dir, exist_ok=True)
186
 
187
  random_embedding = torch.randn(1, 1, input_dim).to(device)
 
188
  if choice == "Create a new character":
189
  _, emb_dict = Embedding_Manager(tokenized_text=None, embedded_text=None, name_batch=None, random_embeddings = random_embedding, timesteps = None,)
190
  test_emb = emb_dict["adained_total_embedding"].to(device)
 
18
  from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
19
  from utils import latents_to_images, downsampling, merge_and_save_images
20
  from omegaconf import OmegaConf
21
+ # from accelerate.utils import set_seed
22
  from tqdm import tqdm
23
  from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
24
  from PIL import Image
 
133
  text_encoder.text_model.embeddings.forward = original_forward
134
 
135
  DEFAULT_STYLE_NAME = "Watercolor"
136
+ # MAX_SEED = np.iinfo(np.int32).max
137
 
138
 
139
  def replace_phrases(prompt):
 
185
  os.makedirs(save_dir, exist_ok=True)
186
 
187
  random_embedding = torch.randn(1, 1, input_dim).to(device)
188
+ print(random_embedding)
189
  if choice == "Create a new character":
190
  _, emb_dict = Embedding_Manager(tokenized_text=None, embedded_text=None, name_batch=None, random_embeddings = random_embedding, timesteps = None,)
191
  test_emb = emb_dict["adained_total_embedding"].to(device)