vinesmsuic commited on
Commit
868a596
1 Parent(s): 77d3539

simple fix to disable museum-unsupported models

Browse files
Files changed (2) hide show
  1. model/model_manager.py +20 -7
  2. model/models/__init__.py +1 -1
model/model_manager.py CHANGED
@@ -5,7 +5,7 @@ import requests
5
  import io, base64, json
6
  import spaces
7
  from PIL import Image
8
- from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS, load_pipeline
9
  from .fetch_museum_results import draw_from_imagen_museum, draw2_from_imagen_museum, draw_from_videogen_museum, draw2_from_videogen_museum
10
 
11
  class ModelManager:
@@ -13,6 +13,7 @@ class ModelManager:
13
  self.model_ig_list = IMAGE_GENERATION_MODELS
14
  self.model_ie_list = IMAGE_EDITION_MODELS
15
  self.model_vg_list = VIDEO_GENERATION_MODELS
 
16
  self.loaded_models = {}
17
 
18
  def load_model_pipe(self, model_name):
@@ -44,8 +45,10 @@ class ModelManager:
44
 
45
 
46
  def generate_image_ig_parallel_anony(self, prompt, model_A, model_B):
 
 
47
  if model_A == "" and model_B == "":
48
- model_names = random.sample([model for model in self.model_ig_list], 2)
49
  else:
50
  model_names = [model_A, model_B]
51
 
@@ -56,8 +59,10 @@ class ModelManager:
56
  return results[0], results[1], model_names[0], model_names[1]
57
 
58
  def generate_image_ig_museum_parallel_anony(self, model_A, model_B):
 
 
59
  if model_A == "" and model_B == "":
60
- model_names = random.sample([model for model in self.model_ig_list], 2)
61
  else:
62
  model_names = [model_A, model_B]
63
 
@@ -125,8 +130,10 @@ class ModelManager:
125
  return image_links[0], image_links[1], image_links[2], prompt_list[0], prompt_list[1], prompt_list[2]
126
 
127
  def generate_image_ie_parallel_anony(self, textbox_source, textbox_target, textbox_instruct, source_image, model_A, model_B):
 
 
128
  if model_A == "" and model_B == "":
129
- model_names = random.sample([model for model in self.model_ie_list], 2)
130
  else:
131
  model_names = [model_A, model_B]
132
  with concurrent.futures.ThreadPoolExecutor() as executor:
@@ -135,8 +142,10 @@ class ModelManager:
135
  return results[0], results[1], model_names[0], model_names[1]
136
 
137
  def generate_image_ie_museum_parallel_anony(self, model_A, model_B):
 
 
138
  if model_A == "" and model_B == "":
139
- model_names = random.sample([model for model in self.model_ie_list], 2)
140
  else:
141
  model_names = [model_A, model_B]
142
  with concurrent.futures.ThreadPoolExecutor() as executor:
@@ -169,8 +178,10 @@ class ModelManager:
169
  return video_link, prompt
170
 
171
  def generate_video_vg_parallel_anony(self, prompt, model_A, model_B):
 
 
172
  if model_A == "" and model_B == "":
173
- model_names = random.sample([model for model in self.model_vg_list], 2)
174
  else:
175
  model_names = [model_A, model_B]
176
 
@@ -181,8 +192,10 @@ class ModelManager:
181
  return results[0], results[1], model_names[0], model_names[1]
182
 
183
  def generate_video_vg_museum_parallel_anony(self, model_A, model_B):
 
 
184
  if model_A == "" and model_B == "":
185
- model_names = random.sample([model for model in self.model_vg_list], 2)
186
  else:
187
  model_names = [model_A, model_B]
188
 
 
5
  import io, base64, json
6
  import spaces
7
  from PIL import Image
8
+ from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS, MUSEUM_UNSUPPORTED_MODELS, load_pipeline
9
  from .fetch_museum_results import draw_from_imagen_museum, draw2_from_imagen_museum, draw_from_videogen_museum, draw2_from_videogen_museum
10
 
11
  class ModelManager:
 
13
  self.model_ig_list = IMAGE_GENERATION_MODELS
14
  self.model_ie_list = IMAGE_EDITION_MODELS
15
  self.model_vg_list = VIDEO_GENERATION_MODELS
16
+ self.excluding_model_list = MUSEUM_UNSUPPORTED_MODELS
17
  self.loaded_models = {}
18
 
19
  def load_model_pipe(self, model_name):
 
45
 
46
 
47
  def generate_image_ig_parallel_anony(self, prompt, model_A, model_B):
48
+ # Using list comprehension to get the difference between two lists
49
+ picking_list = [item for item in self.model_ig_list if item not in self.excluding_model_list]
50
  if model_A == "" and model_B == "":
51
+ model_names = random.sample([model for model in picking_list], 2)
52
  else:
53
  model_names = [model_A, model_B]
54
 
 
59
  return results[0], results[1], model_names[0], model_names[1]
60
 
61
  def generate_image_ig_museum_parallel_anony(self, model_A, model_B):
62
+ # Using list comprehension to get the difference between two lists
63
+ picking_list = [item for item in self.model_ig_list if item not in self.excluding_model_list]
64
  if model_A == "" and model_B == "":
65
+ model_names = random.sample([model for model in picking_list], 2)
66
  else:
67
  model_names = [model_A, model_B]
68
 
 
130
  return image_links[0], image_links[1], image_links[2], prompt_list[0], prompt_list[1], prompt_list[2]
131
 
132
  def generate_image_ie_parallel_anony(self, textbox_source, textbox_target, textbox_instruct, source_image, model_A, model_B):
133
+ # Using list comprehension to get the difference between two lists
134
+ picking_list = [item for item in self.model_ie_list if item not in self.excluding_model_list]
135
  if model_A == "" and model_B == "":
136
+ model_names = random.sample([model for model in picking_list], 2)
137
  else:
138
  model_names = [model_A, model_B]
139
  with concurrent.futures.ThreadPoolExecutor() as executor:
 
142
  return results[0], results[1], model_names[0], model_names[1]
143
 
144
  def generate_image_ie_museum_parallel_anony(self, model_A, model_B):
145
+ # Using list comprehension to get the difference between two lists
146
+ picking_list = [item for item in self.model_ie_list if item not in self.excluding_model_list]
147
  if model_A == "" and model_B == "":
148
+ model_names = random.sample([model for model in picking_list], 2)
149
  else:
150
  model_names = [model_A, model_B]
151
  with concurrent.futures.ThreadPoolExecutor() as executor:
 
178
  return video_link, prompt
179
 
180
  def generate_video_vg_parallel_anony(self, prompt, model_A, model_B):
181
+ # Using list comprehension to get the difference between two lists
182
+ picking_list = [item for item in self.model_vg_list if item not in self.excluding_model_list]
183
  if model_A == "" and model_B == "":
184
+ model_names = random.sample([model for model in picking_list], 2)
185
  else:
186
  model_names = [model_A, model_B]
187
 
 
192
  return results[0], results[1], model_names[0], model_names[1]
193
 
194
  def generate_video_vg_museum_parallel_anony(self, model_A, model_B):
195
+ # Using list comprehension to get the difference between two lists
196
+ picking_list = [item for item in self.model_vg_list if item not in self.excluding_model_list]
197
  if model_A == "" and model_B == "":
198
+ model_names = random.sample([model for model in picking_list], 2)
199
  else:
200
  model_names = [model_A, model_B]
201
 
model/models/__init__.py CHANGED
@@ -21,7 +21,7 @@ VIDEO_GENERATION_MODELS = ['fal_AnimateDiff_text2video',
21
  'videogenhub_VideoCrafter2_generation',
22
  'videogenhub_ModelScope_generation',
23
  'videogenhub_OpenSora_generation']
24
-
25
 
26
  def load_pipeline(model_name):
27
  """
 
21
  'videogenhub_VideoCrafter2_generation',
22
  'videogenhub_ModelScope_generation',
23
  'videogenhub_OpenSora_generation']
24
+ MUSEUM_UNSUPPORTED_MODELS = ['videogenhub_T2VTurbo_generation', 'videogenhub_OpenSoraPlan_generation']
25
 
26
  def load_pipeline(model_name):
27
  """