DongfuJiang commited on
Commit
8e90055
2 Parent(s): ba2995a 992ef30

Merge branch 'main' of https://huggingface.co/spaces/TIGER-Lab/GenAI-Arena

Browse files
arena_elo/edition_model_info.json CHANGED
@@ -41,7 +41,7 @@
41
  },
42
  "InfEdit": {
43
  "Link": "https://huggingface.co/spaces/sled-umich/InfEdit",
44
- "License": "Apache-2.0",
45
  "Organization": "University of Michigan, University of California, Berkeley"
46
  }
47
  }
 
41
  },
42
  "InfEdit": {
43
  "Link": "https://huggingface.co/spaces/sled-umich/InfEdit",
44
+ "License": "CC BY-NC-ND 4.0",
45
  "Organization": "University of Michigan, University of California, Berkeley"
46
  }
47
  }
model/fetch_museum_results/__init__.py CHANGED
@@ -2,8 +2,8 @@ from .imagen_museum import TASK_DICT, DOMAIN
2
  from .imagen_museum import fetch_indexes, fetch_indexes_no_csv
3
  import random
4
 
5
- ARENA_TO_IG_MUSEUM = {"LCM(v1.5/XL)":"LCM",
6
- "PlayGroundV2.5": "PlayGroundV2_5"}
7
 
8
  def draw2_from_imagen_museum(task, model_name1, model_name2):
9
  task_name = TASK_DICT[task]
@@ -61,6 +61,9 @@ def draw2_from_videogen_museum(task, model_name1, model_name2):
61
  domain = "https://github.com/ChromAIca/VideoGenMuseum/raw/main/Museum/"
62
  baselink = domain + "VideoGenHub_Text-Guided_VG"
63
 
 
 
 
64
  matched_results = fetch_indexes_no_csv(baselink)
65
  r = random.Random()
66
  uid, value = r.choice(list(matched_results.items()))
@@ -77,6 +80,8 @@ def draw_from_videogen_museum(task, model_name):
77
  domain = "https://github.com/ChromAIca/VideoGenMuseum/raw/main/Museum/"
78
  baselink = domain + "VideoGenHub_Text-Guided_VG"
79
 
 
 
80
  matched_results = fetch_indexes_no_csv(baselink)
81
  r = random.Random()
82
  uid, value = r.choice(list(matched_results.items()))
 
2
  from .imagen_museum import fetch_indexes, fetch_indexes_no_csv
3
  import random
4
 
5
+ ARENA_TO_IG_MUSEUM = {"LCM(v1.5/XL)":"LCM", "PlayGroundV2.5": "PlayGroundV2_5"}
6
+ ARENA_TO_VG_MUSEUM = {"StableVideoDiffusion": "FastSVD"}
7
 
8
  def draw2_from_imagen_museum(task, model_name1, model_name2):
9
  task_name = TASK_DICT[task]
 
61
  domain = "https://github.com/ChromAIca/VideoGenMuseum/raw/main/Museum/"
62
  baselink = domain + "VideoGenHub_Text-Guided_VG"
63
 
64
+ model_name1 = ARENA_TO_VG_MUSEUM[model_name1] if model_name1 in ARENA_TO_VG_MUSEUM else model_name1
65
+ model_name2 = ARENA_TO_VG_MUSEUM[model_name2] if model_name2 in ARENA_TO_VG_MUSEUM else model_name2
66
+
67
  matched_results = fetch_indexes_no_csv(baselink)
68
  r = random.Random()
69
  uid, value = r.choice(list(matched_results.items()))
 
80
  domain = "https://github.com/ChromAIca/VideoGenMuseum/raw/main/Museum/"
81
  baselink = domain + "VideoGenHub_Text-Guided_VG"
82
 
83
+ model_name = ARENA_TO_VG_MUSEUM[model_name] if model_name in ARENA_TO_VG_MUSEUM else model_name
84
+
85
  matched_results = fetch_indexes_no_csv(baselink)
86
  r = random.Random()
87
  uid, value = r.choice(list(matched_results.items()))
model/model_manager.py CHANGED
@@ -5,7 +5,7 @@ import requests
5
  import io, base64, json
6
  import spaces
7
  from PIL import Image
8
- from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS, load_pipeline
9
  from .fetch_museum_results import draw_from_imagen_museum, draw2_from_imagen_museum, draw_from_videogen_museum, draw2_from_videogen_museum
10
 
11
  class ModelManager:
@@ -13,6 +13,7 @@ class ModelManager:
13
  self.model_ig_list = IMAGE_GENERATION_MODELS
14
  self.model_ie_list = IMAGE_EDITION_MODELS
15
  self.model_vg_list = VIDEO_GENERATION_MODELS
 
16
  self.loaded_models = {}
17
 
18
  def load_model_pipe(self, model_name):
@@ -44,8 +45,10 @@ class ModelManager:
44
 
45
 
46
  def generate_image_ig_parallel_anony(self, prompt, model_A, model_B):
 
 
47
  if model_A == "" and model_B == "":
48
- model_names = random.sample([model for model in self.model_ig_list], 2)
49
  else:
50
  model_names = [model_A, model_B]
51
 
@@ -56,8 +59,10 @@ class ModelManager:
56
  return results[0], results[1], model_names[0], model_names[1]
57
 
58
  def generate_image_ig_museum_parallel_anony(self, model_A, model_B):
 
 
59
  if model_A == "" and model_B == "":
60
- model_names = random.sample([model for model in self.model_ig_list], 2)
61
  else:
62
  model_names = [model_A, model_B]
63
 
@@ -125,8 +130,10 @@ class ModelManager:
125
  return image_links[0], image_links[1], image_links[2], prompt_list[0], prompt_list[1], prompt_list[2]
126
 
127
  def generate_image_ie_parallel_anony(self, textbox_source, textbox_target, textbox_instruct, source_image, model_A, model_B):
 
 
128
  if model_A == "" and model_B == "":
129
- model_names = random.sample([model for model in self.model_ie_list], 2)
130
  else:
131
  model_names = [model_A, model_B]
132
  with concurrent.futures.ThreadPoolExecutor() as executor:
@@ -135,8 +142,10 @@ class ModelManager:
135
  return results[0], results[1], model_names[0], model_names[1]
136
 
137
  def generate_image_ie_museum_parallel_anony(self, model_A, model_B):
 
 
138
  if model_A == "" and model_B == "":
139
- model_names = random.sample([model for model in self.model_ie_list], 2)
140
  else:
141
  model_names = [model_A, model_B]
142
  with concurrent.futures.ThreadPoolExecutor() as executor:
@@ -169,8 +178,10 @@ class ModelManager:
169
  return video_link, prompt
170
 
171
  def generate_video_vg_parallel_anony(self, prompt, model_A, model_B):
 
 
172
  if model_A == "" and model_B == "":
173
- model_names = random.sample([model for model in self.model_vg_list], 2)
174
  else:
175
  model_names = [model_A, model_B]
176
 
@@ -181,8 +192,10 @@ class ModelManager:
181
  return results[0], results[1], model_names[0], model_names[1]
182
 
183
  def generate_video_vg_museum_parallel_anony(self, model_A, model_B):
 
 
184
  if model_A == "" and model_B == "":
185
- model_names = random.sample([model for model in self.model_vg_list], 2)
186
  else:
187
  model_names = [model_A, model_B]
188
 
 
5
  import io, base64, json
6
  import spaces
7
  from PIL import Image
8
+ from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS, MUSEUM_UNSUPPORTED_MODELS, load_pipeline
9
  from .fetch_museum_results import draw_from_imagen_museum, draw2_from_imagen_museum, draw_from_videogen_museum, draw2_from_videogen_museum
10
 
11
  class ModelManager:
 
13
  self.model_ig_list = IMAGE_GENERATION_MODELS
14
  self.model_ie_list = IMAGE_EDITION_MODELS
15
  self.model_vg_list = VIDEO_GENERATION_MODELS
16
+ self.excluding_model_list = MUSEUM_UNSUPPORTED_MODELS
17
  self.loaded_models = {}
18
 
19
  def load_model_pipe(self, model_name):
 
45
 
46
 
47
  def generate_image_ig_parallel_anony(self, prompt, model_A, model_B):
48
+ # Using list comprehension to get the difference between two lists
49
+ picking_list = [item for item in self.model_ig_list if item not in self.excluding_model_list]
50
  if model_A == "" and model_B == "":
51
+ model_names = random.sample([model for model in picking_list], 2)
52
  else:
53
  model_names = [model_A, model_B]
54
 
 
59
  return results[0], results[1], model_names[0], model_names[1]
60
 
61
  def generate_image_ig_museum_parallel_anony(self, model_A, model_B):
62
+ # Using list comprehension to get the difference between two lists
63
+ picking_list = [item for item in self.model_ig_list if item not in self.excluding_model_list]
64
  if model_A == "" and model_B == "":
65
+ model_names = random.sample([model for model in picking_list], 2)
66
  else:
67
  model_names = [model_A, model_B]
68
 
 
130
  return image_links[0], image_links[1], image_links[2], prompt_list[0], prompt_list[1], prompt_list[2]
131
 
132
  def generate_image_ie_parallel_anony(self, textbox_source, textbox_target, textbox_instruct, source_image, model_A, model_B):
133
+ # Using list comprehension to get the difference between two lists
134
+ picking_list = [item for item in self.model_ie_list if item not in self.excluding_model_list]
135
  if model_A == "" and model_B == "":
136
+ model_names = random.sample([model for model in picking_list], 2)
137
  else:
138
  model_names = [model_A, model_B]
139
  with concurrent.futures.ThreadPoolExecutor() as executor:
 
142
  return results[0], results[1], model_names[0], model_names[1]
143
 
144
  def generate_image_ie_museum_parallel_anony(self, model_A, model_B):
145
+ # Using list comprehension to get the difference between two lists
146
+ picking_list = [item for item in self.model_ie_list if item not in self.excluding_model_list]
147
  if model_A == "" and model_B == "":
148
+ model_names = random.sample([model for model in picking_list], 2)
149
  else:
150
  model_names = [model_A, model_B]
151
  with concurrent.futures.ThreadPoolExecutor() as executor:
 
178
  return video_link, prompt
179
 
180
  def generate_video_vg_parallel_anony(self, prompt, model_A, model_B):
181
+ # Using list comprehension to get the difference between two lists
182
+ picking_list = [item for item in self.model_vg_list if item not in self.excluding_model_list]
183
  if model_A == "" and model_B == "":
184
+ model_names = random.sample([model for model in picking_list], 2)
185
  else:
186
  model_names = [model_A, model_B]
187
 
 
192
  return results[0], results[1], model_names[0], model_names[1]
193
 
194
  def generate_video_vg_museum_parallel_anony(self, model_A, model_B):
195
+ # Using list comprehension to get the difference between two lists
196
+ picking_list = [item for item in self.model_vg_list if item not in self.excluding_model_list]
197
  if model_A == "" and model_B == "":
198
+ model_names = random.sample([model for model in picking_list], 2)
199
  else:
200
  model_names = [model_A, model_B]
201
 
model/models/__init__.py CHANGED
@@ -7,7 +7,7 @@ from .videogenhub_models import load_videogenhub_model
7
  # IMAGE_GENERATION_MODELS = ['fal_LCM(v1.5/XL)_text2image','fal_SDXLTurbo_text2image','fal_SDXL_text2image', 'imagenhub_PixArtAlpha_generation', 'fal_PixArtSigma_text2image',
8
  # 'imagenhub_OpenJourney_generation','fal_SDXLLightning_text2image', 'fal_StableCascade_text2image',
9
  # 'playground_PlayGroundV2_generation', 'playground_PlayGroundV2.5_generation']
10
- IMAGE_GENERATION_MODELS = ['imagenhub_SDXLTurbo_generation','imagenhub_SDXL_generation', 'imagenhub_PixArtAlpha_generation',
11
  'imagenhub_OpenJourney_generation','imagenhub_SDXLLightning_generation', 'imagenhub_StableCascade_generation',
12
  'playground_PlayGroundV2_generation', 'playground_PlayGroundV2.5_generation']
13
  IMAGE_EDITION_MODELS = ['imagenhub_CycleDiffusion_edition', 'imagenhub_Pix2PixZero_edition', 'imagenhub_Prompt2prompt_edition',
@@ -16,10 +16,12 @@ IMAGE_EDITION_MODELS = ['imagenhub_CycleDiffusion_edition', 'imagenhub_Pix2PixZe
16
  'imagenhub_InfEdit_edition', 'imagenhub_CosXLEdit_edition']
17
  VIDEO_GENERATION_MODELS = ['fal_AnimateDiff_text2video',
18
  'fal_AnimateDiffTurbo_text2video',
19
- #'fal_StableVideoDiffusion_text2video',
20
- 'videogenhub_LaVie_generation', 'videogenhub_VideoCrafter2_generation',
21
- 'videogenhub_ModelScope_generation', 'videogenhub_OpenSora_generation']
22
-
 
 
23
 
24
  def load_pipeline(model_name):
25
  """
 
7
  # IMAGE_GENERATION_MODELS = ['fal_LCM(v1.5/XL)_text2image','fal_SDXLTurbo_text2image','fal_SDXL_text2image', 'imagenhub_PixArtAlpha_generation', 'fal_PixArtSigma_text2image',
8
  # 'imagenhub_OpenJourney_generation','fal_SDXLLightning_text2image', 'fal_StableCascade_text2image',
9
  # 'playground_PlayGroundV2_generation', 'playground_PlayGroundV2.5_generation']
10
+ IMAGE_GENERATION_MODELS = ['imagenhub_SDXLTurbo_generation','imagenhub_SDXL_generation', 'imagenhub_PixArtAlpha_generation', 'imagenhub_PixArtSigma_generation',
11
  'imagenhub_OpenJourney_generation','imagenhub_SDXLLightning_generation', 'imagenhub_StableCascade_generation',
12
  'playground_PlayGroundV2_generation', 'playground_PlayGroundV2.5_generation']
13
  IMAGE_EDITION_MODELS = ['imagenhub_CycleDiffusion_edition', 'imagenhub_Pix2PixZero_edition', 'imagenhub_Prompt2prompt_edition',
 
16
  'imagenhub_InfEdit_edition', 'imagenhub_CosXLEdit_edition']
17
  VIDEO_GENERATION_MODELS = ['fal_AnimateDiff_text2video',
18
  'fal_AnimateDiffTurbo_text2video',
19
+ 'fal_StableVideoDiffusion_text2video',
20
+ 'videogenhub_LaVie_generation',
21
+ 'videogenhub_VideoCrafter2_generation',
22
+ 'videogenhub_ModelScope_generation',
23
+ 'videogenhub_OpenSora_generation', 'videogenhub_T2VTurbo_generation']
24
+ MUSEUM_UNSUPPORTED_MODELS = ['videogenhub_OpenSoraPlan_generation']
25
 
26
  def load_pipeline(model_name):
27
  """
requirements.txt CHANGED
@@ -66,8 +66,9 @@ pyav
66
  pyarrow
67
  tensorboard
68
  timm
69
- pandarallel
70
  wandb
 
 
71
 
72
 
73
 
 
66
  pyarrow
67
  tensorboard
68
  timm
 
69
  wandb
70
+ pandarallel
71
+
72
 
73
 
74