linoyts HF staff commited on
Commit
1c78270
1 Parent(s): 261ff69

Update clip_slider_pipeline.py

Browse files
Files changed (1) hide show
  1. clip_slider_pipeline.py +7 -7
clip_slider_pipeline.py CHANGED
@@ -48,9 +48,9 @@ class CLIPSlider:
48
  pos_prompt = f"a {medium} of a {target_word} {subject}"
49
  neg_prompt = f"a {medium} of a {opposite} {subject}"
50
  pos_toks = self.pipe.tokenizer(pos_prompt, return_tensors="pt", padding="max_length", truncation=True,
51
- max_length=self.pipe.tokenizer.model_max_length).input_ids
52
  neg_toks = self.pipe.tokenizer(neg_prompt, return_tensors="pt", padding="max_length", truncation=True,
53
- max_length=self.pipe.tokenizer.model_max_length).input_ids
54
  pos = self.pipe.text_encoder(pos_toks).pooler_output
55
  neg = self.pipe.text_encoder(neg_toks).pooler_output
56
  positives.append(pos)
@@ -82,7 +82,7 @@ class CLIPSlider:
82
 
83
  with torch.no_grad():
84
  toks = self.pipe.tokenizer(prompt, return_tensors="pt", padding="max_length", truncation=True,
85
- max_length=self.pipe.tokenizer.model_max_length).input_ids
86
  prompt_embeds = self.pipe.text_encoder(toks).last_hidden_state
87
 
88
  if avg_diff_2nd and normalize_scales:
@@ -303,18 +303,18 @@ class CLIPSlider3(CLIPSlider):
303
  neg_prompt = f"a {medium} of a {opposite} {subject}"
304
 
305
  pos_toks = self.pipe.tokenizer(pos_prompt, return_tensors="pt", padding="max_length", truncation=True,
306
- max_length=self.pipe.tokenizer.model_max_length).input_ids
307
  neg_toks = self.pipe.tokenizer(neg_prompt, return_tensors="pt", padding="max_length", truncation=True,
308
- max_length=self.pipe.tokenizer.model_max_length).input_ids
309
  pos = self.pipe.text_encoder(pos_toks).text_embeds
310
  neg = self.pipe.text_encoder(neg_toks).text_embeds
311
  positives.append(pos)
312
  negatives.append(neg)
313
 
314
  pos_toks2 = self.pipe.tokenizer_2(pos_prompt, return_tensors="pt", padding="max_length", truncation=True,
315
- max_length=self.pipe.tokenizer_2.model_max_length).input_ids
316
  neg_toks2 = self.pipe.tokenizer_2(neg_prompt, return_tensors="pt", padding="max_length", truncation=True,
317
- max_length=self.pipe.tokenizer_2.model_max_length).input_ids
318
  pos2 = self.pipe.text_encoder_2(pos_toks2).text_embeds
319
  neg2 = self.pipe.text_encoder_2(neg_toks2).text_embeds
320
  positives2.append(pos2)
 
48
  pos_prompt = f"a {medium} of a {target_word} {subject}"
49
  neg_prompt = f"a {medium} of a {opposite} {subject}"
50
  pos_toks = self.pipe.tokenizer(pos_prompt, return_tensors="pt", padding="max_length", truncation=True,
51
+ max_length=self.pipe.tokenizer.model_max_length).input_ids.to(self.pipe.device)
52
  neg_toks = self.pipe.tokenizer(neg_prompt, return_tensors="pt", padding="max_length", truncation=True,
53
+ max_length=self.pipe.tokenizer.model_max_length).input_ids.to(self.pipe.device)
54
  pos = self.pipe.text_encoder(pos_toks).pooler_output
55
  neg = self.pipe.text_encoder(neg_toks).pooler_output
56
  positives.append(pos)
 
82
 
83
  with torch.no_grad():
84
  toks = self.pipe.tokenizer(prompt, return_tensors="pt", padding="max_length", truncation=True,
85
+ max_length=self.pipe.tokenizer.model_max_length).input_ids.to(self.pipe.device)
86
  prompt_embeds = self.pipe.text_encoder(toks).last_hidden_state
87
 
88
  if avg_diff_2nd and normalize_scales:
 
303
  neg_prompt = f"a {medium} of a {opposite} {subject}"
304
 
305
  pos_toks = self.pipe.tokenizer(pos_prompt, return_tensors="pt", padding="max_length", truncation=True,
306
+ max_length=self.pipe.tokenizer.model_max_length).input_ids.to(self.pipe.device)
307
  neg_toks = self.pipe.tokenizer(neg_prompt, return_tensors="pt", padding="max_length", truncation=True,
308
+ max_length=self.pipe.tokenizer.model_max_length).input_ids.to(self.pipe.device)
309
  pos = self.pipe.text_encoder(pos_toks).text_embeds
310
  neg = self.pipe.text_encoder(neg_toks).text_embeds
311
  positives.append(pos)
312
  negatives.append(neg)
313
 
314
  pos_toks2 = self.pipe.tokenizer_2(pos_prompt, return_tensors="pt", padding="max_length", truncation=True,
315
+ max_length=self.pipe.tokenizer_2.model_max_length).input_ids.to(self.pipe.device)
316
  neg_toks2 = self.pipe.tokenizer_2(neg_prompt, return_tensors="pt", padding="max_length", truncation=True,
317
+ max_length=self.pipe.tokenizer_2.model_max_length).input_ids.to(self.pipe.device)
318
  pos2 = self.pipe.text_encoder_2(pos_toks2).text_embeds
319
  neg2 = self.pipe.text_encoder_2(neg_toks2).text_embeds
320
  positives2.append(pos2)