K-Sort-Arena / model /models /__init__.py
YangZhoumill's picture
release code
a4b32da
raw
history blame
3.66 kB
from .huggingface_models import load_huggingface_model
from .replicate_api_models import load_replicate_model
from .openai_api_models import load_openai_model
from .other_api_models import load_other_model
IMAGE_GENERATION_MODELS = [
'replicate_SDXL_text2image',
'replicate_SD-v3.0_text2image',
'replicate_SD-v2.1_text2image',
'replicate_SD-v1.5_text2image',
'replicate_SDXL-Lightning_text2image',
'replicate_Kandinsky-v2.0_text2image',
'replicate_Kandinsky-v2.2_text2image',
'replicate_Proteus-v0.2_text2image',
'replicate_Playground-v2.0_text2image',
'replicate_Playground-v2.5_text2image',
'replicate_Dreamshaper-xl-turbo_text2image',
'replicate_SDXL-Deepcache_text2image',
'replicate_Openjourney-v4_text2image',
'replicate_LCM-v1.5_text2image',
'replicate_Realvisxl-v3.0_text2image',
'replicate_Realvisxl-v2.0_text2image',
'replicate_Pixart-Sigma_text2image',
'replicate_SSD-1b_text2image',
'replicate_Open-Dalle-v1.1_text2image',
'replicate_Deepfloyd-IF_text2image',
'huggingface_SD-turbo_text2image',
'huggingface_SDXL-turbo_text2image',
'huggingface_Stable-cascade_text2image',
'openai_Dalle-2_text2image',
'openai_Dalle-3_text2image',
'other_Midjourney-v6.0_text2image',
'other_Midjourney-v5.0_text2image',
"replicate_FLUX.1-schnell_text2image",
"replicate_FLUX.1-pro_text2image",
"replicate_FLUX.1-dev_text2image",
]
VIDEO_GENERATION_MODELS = ['replicate_Zeroscope-v2-xl_text2video',
'replicate_Animate-Diff_text2video',
'replicate_OpenSora_text2video',
'replicate_LaVie_text2video',
'replicate_VideoCrafter2_text2video',
'replicate_Stable-Video-Diffusion_text2video',
'other_Runway-Gen3_text2video',
'other_Pika-beta_text2video',
'other_Pika-v1.0_text2video',
'other_Runway-Gen2_text2video',
'other_Sora_text2video',
]
def load_pipeline(model_name):
"""
Load a model pipeline based on the model name
Args:
model_name (str): The name of the model to load, should be of the form {source}_{name}_{type}
"""
model_source, model_name, model_type = model_name.split("_")
if model_source == "replicate":
pipe = load_replicate_model(model_name, model_type)
elif model_source == "huggingface":
pipe = load_huggingface_model(model_name, model_type)
elif model_source == "openai":
pipe = load_openai_model(model_name, model_type)
elif model_source == "other":
pipe = load_other_model(model_name, model_type)
else:
raise ValueError(f"Model source {model_source} not supported")
return pipe