Commit
·
58d3dab
1
Parent(s):
85dd753
commit
Browse files- README.md +2 -0
- data/taxonomy_3d-model_creation.jsonl +3 -0
- data/taxonomy_3d-model_editing.jsonl +4 -0
- data/taxonomy_audio_creation.jsonl +7 -0
- data/taxonomy_audio_editing.jsonl +7 -0
- data/taxonomy_image_creation.jsonl +6 -0
- data/taxonomy_image_editing.jsonl +11 -0
- data/taxonomy_text_creation.jsonl +9 -0
- data/taxonomy_text_editing.jsonl +4 -0
- data/taxonomy_video_creation.jsonl +5 -0
- data/taxonomy_video_editing.jsonl +7 -0
- data/train.jsonl +63 -0
- taxonomy/3d-generation/creation/modalities.json +76 -0
- taxonomy/3d-generation/editing/modalities.json +103 -2
- taxonomy/MODALITY_INDEX.md +132 -0
- taxonomy/audio-generation/creation/modalities.json +187 -0
- taxonomy/audio-generation/editing/modalities.json +187 -0
- taxonomy/image-generation/creation/modalities.json +154 -0
- taxonomy/image-generation/editing/modalities.json +287 -0
- taxonomy/text-generation/creation/modalities.json +226 -2
- taxonomy/text-generation/editing/modalities.json +103 -2
- taxonomy/video-generation/creation/modalities.json +147 -0
- taxonomy/video-generation/editing/modalities.json +211 -0
README.md
CHANGED
|
@@ -2,11 +2,13 @@
|
|
| 2 |
language:
|
| 3 |
- en
|
| 4 |
license: cc0-1.0
|
|
|
|
| 5 |
size_categories:
|
| 6 |
- n<1K
|
| 7 |
task_categories:
|
| 8 |
- other
|
| 9 |
pretty_name: Multimodal AI Taxonomy
|
|
|
|
| 10 |
tags:
|
| 11 |
- multimodal
|
| 12 |
- taxonomy
|
|
|
|
| 2 |
language:
|
| 3 |
- en
|
| 4 |
license: cc0-1.0
|
| 5 |
+
|
| 6 |
size_categories:
|
| 7 |
- n<1K
|
| 8 |
task_categories:
|
| 9 |
- other
|
| 10 |
pretty_name: Multimodal AI Taxonomy
|
| 11 |
+
short_description: Exploring a multimodal AI taxonomy
|
| 12 |
tags:
|
| 13 |
- multimodal
|
| 14 |
- taxonomy
|
data/taxonomy_3d-model_creation.jsonl
CHANGED
|
@@ -1,2 +1,5 @@
|
|
| 1 |
{"id": "text-to-3d", "name": "Text to 3D Model", "input_primary": "text", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-synthesis\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["3D asset generation", "Rapid prototyping", "Game asset creation"], "metadata_platforms": ["Replicate", "Meshy", "3DFY"], "metadata_example_models": ["Point-E", "Shap-E", "DreamFusion"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
| 2 |
{"id": "img-to-3d", "name": "Image to 3D Model", "input_primary": "image", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-reconstruction\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["3D reconstruction", "Object digitization", "Asset creation from photos"], "metadata_platforms": ["Replicate", "Meshy", "Luma AI"], "metadata_example_models": ["Zero-1-to-3", "Wonder3D"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
{"id": "text-to-3d", "name": "Text to 3D Model", "input_primary": "text", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-synthesis\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["3D asset generation", "Rapid prototyping", "Game asset creation"], "metadata_platforms": ["Replicate", "Meshy", "3DFY"], "metadata_example_models": ["Point-E", "Shap-E", "DreamFusion"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
| 2 |
{"id": "img-to-3d", "name": "Image to 3D Model", "input_primary": "image", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-reconstruction\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["3D reconstruction", "Object digitization", "Asset creation from photos"], "metadata_platforms": ["Replicate", "Meshy", "Luma AI"], "metadata_example_models": ["Zero-1-to-3", "Wonder3D"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
| 3 |
+
{"id": "multimodal-img-to-3d", "name": "Multi-Image to 3D Model", "input_primary": "image", "input_secondary": ["image"], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-reconstruction\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Photogrammetry", "3D scanning", "Object reconstruction"], "metadata_platforms": ["Luma AI", "Polycam", "Reality Capture"], "metadata_example_models": ["NeRF", "Gaussian Splatting", "Photogrammetry"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
| 4 |
+
{"id": "vid-to-3d", "name": "Video to 3D Model", "input_primary": "video", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-reconstruction\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video-based reconstruction", "Motion to 3D", "Scene capture"], "metadata_platforms": ["Luma AI", "Polycam"], "metadata_example_models": ["NeRF", "Gaussian Splatting"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
| 5 |
+
{"id": "text-img-to-3d", "name": "Text + Image to 3D Model", "input_primary": "text", "input_secondary": ["image"], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-synthesis\", \"guidanceType\": \"text-and-visual\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Guided 3D generation", "Controlled asset creation", "Reference-based modeling"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
data/taxonomy_3d-model_editing.jsonl
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"id": "3d-to-3d-optimization", "name": "3D Model Optimization", "input_primary": "3d-model", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"enhancement\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Polygon reduction", "LOD generation", "Performance optimization"], "metadata_platforms": ["Blender", "Maya", "Simplygon"], "metadata_example_models": [], "relationships": "{}", "output_modality": "3d-model", "operation_type": "editing"}
|
| 2 |
+
{"id": "3d-to-3d-texturing", "name": "3D Model Texturing", "input_primary": "3d-model", "input_secondary": ["text"], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Texture generation", "Material application", "PBR material creation"], "metadata_platforms": ["Experimental", "Adobe Substance"], "metadata_example_models": ["TEXTure", "Text2Tex"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "editing"}
|
| 3 |
+
{"id": "3d-to-3d-rigging", "name": "3D Model Rigging", "input_primary": "3d-model", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"modification\": \"transformation\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Auto-rigging", "Skeleton generation", "Animation preparation"], "metadata_platforms": ["Mixamo", "AccuRIG"], "metadata_example_models": ["Mixamo Auto-Rigger"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "editing"}
|
| 4 |
+
{"id": "3d-to-3d-style-transfer", "name": "3D Style Transfer", "input_primary": "3d-model", "input_secondary": ["text", "image"], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"modification\": \"transformation\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Artistic 3D styling", "Model transformation", "Creative effects"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "3d-model", "operation_type": "editing"}
|
data/taxonomy_audio_creation.jsonl
CHANGED
|
@@ -1,3 +1,10 @@
|
|
| 1 |
{"id": "text-to-audio", "name": "Text to Audio", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"general\", \"audioCategories\": [\"speech\", \"sound-effects\", \"music\", \"ambient\"]}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Sound effect generation", "Voiceover creation", "Audio asset production"], "metadata_platforms": ["Replicate", "ElevenLabs", "AudioCraft"], "metadata_example_models": ["AudioGen", "MusicGen"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 2 |
{"id": "text-to-speech", "name": "Text to Speech", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "speech", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"speech\", \"voiceCloning\": false}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Narration", "Accessibility", "Voice assistants"], "metadata_platforms": ["ElevenLabs", "Google Cloud", "Azure", "AWS"], "metadata_example_models": ["ElevenLabs", "Google WaveNet", "Azure Neural TTS"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 3 |
{"id": "text-to-music", "name": "Text to Music", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"music\", \"melodic\": true}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Background music generation", "Musical composition", "Soundtrack creation"], "metadata_platforms": ["Replicate", "Stability AI"], "metadata_example_models": ["MusicGen", "Stable Audio"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
{"id": "text-to-audio", "name": "Text to Audio", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"general\", \"audioCategories\": [\"speech\", \"sound-effects\", \"music\", \"ambient\"]}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Sound effect generation", "Voiceover creation", "Audio asset production"], "metadata_platforms": ["Replicate", "ElevenLabs", "AudioCraft"], "metadata_example_models": ["AudioGen", "MusicGen"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 2 |
{"id": "text-to-speech", "name": "Text to Speech", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "speech", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"speech\", \"voiceCloning\": false}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Narration", "Accessibility", "Voice assistants"], "metadata_platforms": ["ElevenLabs", "Google Cloud", "Azure", "AWS"], "metadata_example_models": ["ElevenLabs", "Google WaveNet", "Azure Neural TTS"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 3 |
{"id": "text-to-music", "name": "Text to Music", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"music\", \"melodic\": true}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Background music generation", "Musical composition", "Soundtrack creation"], "metadata_platforms": ["Replicate", "Stability AI"], "metadata_example_models": ["MusicGen", "Stable Audio"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 4 |
+
{"id": "text-to-speech-voice-clone", "name": "Text to Speech (Voice Cloning)", "input_primary": "text", "input_secondary": ["audio"], "output_primary": "audio", "output_audio": false, "output_audio_type": "speech", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"speech\", \"voiceCloning\": true}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Custom voice synthesis", "Personalized narration", "Voice preservation"], "metadata_platforms": ["ElevenLabs", "Replicate", "PlayHT"], "metadata_example_models": ["ElevenLabs", "XTTS", "Bark"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 5 |
+
{"id": "text-to-sound-effects", "name": "Text to Sound Effects", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "sound-effects", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"sound-effects\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["SFX generation", "Foley creation", "Game audio"], "metadata_platforms": ["Replicate", "AudioCraft"], "metadata_example_models": ["AudioGen", "AudioLDM"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 6 |
+
{"id": "img-to-audio", "name": "Image to Audio", "input_primary": "image", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"general\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Image sonification", "Scene audio generation", "Accessibility"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 7 |
+
{"id": "music-to-music-style", "name": "Music Style Transfer", "input_primary": "music", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"transformation\", \"audioType\": \"music\", \"melodic\": true}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Genre transformation", "Instrument swap", "Musical reimagining"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 8 |
+
{"id": "vid-to-audio-extraction", "name": "Video to Audio Extraction", "input_primary": "video", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"transformation\", \"audioType\": \"general\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Audio extraction", "Soundtrack isolation", "Voice extraction"], "metadata_platforms": ["FFmpeg", "Standard tools"], "metadata_example_models": [], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 9 |
+
{"id": "humming-to-music", "name": "Humming to Music", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"music\", \"melodic\": true}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Melody to full track", "Musical idea development", "Composition assistance"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 10 |
+
{"id": "lyrics-to-music", "name": "Lyrics to Music", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"music\", \"melodic\": true, \"voiceCloning\": false}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Song generation", "Music composition", "Vocal track creation"], "metadata_platforms": ["Suno", "Udio", "Replicate"], "metadata_example_models": ["Suno", "Udio", "MusicGen"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
data/taxonomy_audio_editing.jsonl
CHANGED
|
@@ -1,2 +1,9 @@
|
|
| 1 |
{"id": "audio-to-audio-inpainting", "name": "Audio to Audio (Inpainting)", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"inpainting\", \"modification\": \"selective-editing\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Audio editing", "Sound design", "Audio restoration"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 2 |
{"id": "music-to-music-inpainting", "name": "Music to Music (Inpainting)", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"inpainting\", \"modification\": \"selective-editing\", \"melodic\": true, \"audioSubtype\": \"music\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Music editing", "Compositional modifications", "Arrangement changes"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{\"parent\": \"audio-to-audio-inpainting\", \"note\": \"Music inpainting is a specialized subset of audio inpainting\"}", "output_modality": "audio", "operation_type": "editing"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
{"id": "audio-to-audio-inpainting", "name": "Audio to Audio (Inpainting)", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"inpainting\", \"modification\": \"selective-editing\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Audio editing", "Sound design", "Audio restoration"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 2 |
{"id": "music-to-music-inpainting", "name": "Music to Music (Inpainting)", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"inpainting\", \"modification\": \"selective-editing\", \"melodic\": true, \"audioSubtype\": \"music\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Music editing", "Compositional modifications", "Arrangement changes"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{\"parent\": \"audio-to-audio-inpainting\", \"note\": \"Music inpainting is a specialized subset of audio inpainting\"}", "output_modality": "audio", "operation_type": "editing"}
|
| 3 |
+
{"id": "audio-to-audio-enhancement", "name": "Audio Enhancement", "input_primary": "audio", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"enhancement\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Noise reduction", "Quality improvement", "Audio cleanup"], "metadata_platforms": ["Adobe", "iZotope", "Replicate"], "metadata_example_models": ["Adobe Podcast", "Krisp", "Denoiser"], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 4 |
+
{"id": "audio-to-audio-restoration", "name": "Audio Restoration", "input_primary": "audio", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"enhancement\", \"modification\": \"restoration\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Historical recording restoration", "Audio artifact removal", "Damaged audio repair"], "metadata_platforms": ["iZotope", "Adobe", "Accusonus"], "metadata_example_models": ["iZotope RX", "Adobe Audition"], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 5 |
+
{"id": "audio-to-audio-voice-conversion", "name": "Voice Conversion", "input_primary": "audio", "input_secondary": ["audio"], "output_primary": "audio", "output_audio": false, "output_audio_type": "speech", "characteristics": "{\"processType\": \"transformation\", \"audioType\": \"speech\", \"voiceCloning\": true, \"modification\": \"transformation\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Voice swapping", "Dubbing", "Voice translation"], "metadata_platforms": ["ElevenLabs", "Respeecher", "Replicate"], "metadata_example_models": ["RVC", "So-VITS-SVC", "Respeecher"], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 6 |
+
{"id": "music-to-music-stem-separation", "name": "Music Stem Separation", "input_primary": "music", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"transformation\", \"audioType\": \"music\", \"modification\": \"selective-editing\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Vocal isolation", "Instrument extraction", "Remixing", "Karaoke creation"], "metadata_platforms": ["Spleeter", "Demucs", "Replicate"], "metadata_example_models": ["Demucs", "Spleeter", "Ultimate Vocal Remover"], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 7 |
+
{"id": "audio-to-audio-speed-change", "name": "Audio Speed/Pitch Modification", "input_primary": "audio", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"transformation\", \"modification\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Tempo adjustment", "Pitch shifting", "Time stretching"], "metadata_platforms": ["Standard tools", "Adobe"], "metadata_example_models": ["Rubber Band", "Paulstretch"], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 8 |
+
{"id": "music-to-music-mastering", "name": "AI Music Mastering", "input_primary": "music", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"enhancement\", \"audioType\": \"music\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Automated mastering", "Mix enhancement", "Final polish"], "metadata_platforms": ["LANDR", "iZotope", "eMastered"], "metadata_example_models": ["LANDR", "iZotope Ozone"], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 9 |
+
{"id": "audio-to-audio-spatial", "name": "Spatial Audio Conversion", "input_primary": "audio", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"transformation\", \"modification\": \"transformation\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Stereo to spatial", "Binaural conversion", "3D audio creation"], "metadata_platforms": ["Dolby", "Sony", "Experimental"], "metadata_example_models": ["Dolby Atmos", "Sony 360 Reality Audio"], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
data/taxonomy_image_creation.jsonl
CHANGED
|
@@ -1 +1,7 @@
|
|
| 1 |
{"id": "text-to-img", "name": "Text to Image", "input_primary": "text", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"synthesis\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Concept art generation", "Product mockups", "Marketing assets"], "metadata_platforms": ["Replicate", "Stability AI", "Midjourney", "DALL-E"], "metadata_example_models": ["Stable Diffusion", "DALL-E 3", "Midjourney"], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
{"id": "text-to-img", "name": "Text to Image", "input_primary": "text", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"synthesis\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Concept art generation", "Product mockups", "Marketing assets"], "metadata_platforms": ["Replicate", "Stability AI", "Midjourney", "DALL-E"], "metadata_example_models": ["Stable Diffusion", "DALL-E 3", "Midjourney"], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
| 2 |
+
{"id": "text-img-to-img", "name": "Text + Image to Image", "input_primary": "text", "input_secondary": ["image"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"synthesis\", \"guidanceType\": \"text-and-visual\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Image-guided generation", "Style reference", "Composition guidance"], "metadata_platforms": ["Replicate", "Stability AI", "Midjourney"], "metadata_example_models": ["Stable Diffusion with ControlNet", "DALL-E 3", "Midjourney"], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
| 3 |
+
{"id": "img-to-img-upscale", "name": "Image Upscaling", "input_primary": "image", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"enhancement\", \"generationType\": \"synthesis\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Resolution enhancement", "Quality improvement", "Detail enhancement"], "metadata_platforms": ["Topaz", "Replicate", "Stability AI"], "metadata_example_models": ["Real-ESRGAN", "Topaz Gigapixel", "SUPIR"], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
| 4 |
+
{"id": "vid-to-img-frame-extraction", "name": "Video to Image (Frame Extraction)", "input_primary": "video", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"generationType\": \"synthesis\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Frame extraction", "Thumbnail generation", "Video analysis"], "metadata_platforms": ["FFmpeg", "Standard tools"], "metadata_example_models": [], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
| 5 |
+
{"id": "3d-to-img-render", "name": "3D to Image (Rendering)", "input_primary": "3d-model", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"rendering\", \"renderType\": \"3d-rendering\", \"generationType\": \"synthesis\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Product rendering", "3D visualization", "Architectural rendering"], "metadata_platforms": ["Blender", "Unreal Engine", "Unity"], "metadata_example_models": [], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
| 6 |
+
{"id": "audio-to-img-visualization", "name": "Audio to Image (Visualization)", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"audioVisualization\": true, \"generationType\": \"synthesis\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Album art generation", "Sound visualization", "Music imagery"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
| 7 |
+
{"id": "sketch-to-img", "name": "Sketch to Image", "input_primary": "image", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"synthesis\", \"guidanceType\": \"text-and-visual\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Sketch refinement", "Concept development", "Design exploration"], "metadata_platforms": ["Replicate", "Stability AI"], "metadata_example_models": ["ControlNet Scribble", "Pix2Pix"], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
data/taxonomy_image_editing.jsonl
CHANGED
|
@@ -1 +1,12 @@
|
|
| 1 |
{"id": "img-to-img", "name": "Image to Image", "input_primary": "image", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"enhancement\", \"editing\", \"inpainting\"]}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Image editing", "Style transfer", "Image enhancement", "Object removal/addition"], "metadata_platforms": ["Replicate", "Stability AI", "Midjourney"], "metadata_example_models": ["Stable Diffusion img2img", "ControlNet"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
{"id": "img-to-img", "name": "Image to Image", "input_primary": "image", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"enhancement\", \"editing\", \"inpainting\"]}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Image editing", "Style transfer", "Image enhancement", "Object removal/addition"], "metadata_platforms": ["Replicate", "Stability AI", "Midjourney"], "metadata_example_models": ["Stable Diffusion img2img", "ControlNet"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 2 |
+
{"id": "img-to-img-inpainting", "name": "Image Inpainting", "input_primary": "image", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"inpainting\", \"transformationTypes\": [\"inpainting\"], \"modification\": \"selective-editing\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Object removal", "Background extension", "Image repair", "Content-aware fill"], "metadata_platforms": ["Replicate", "Adobe", "Stability AI"], "metadata_example_models": ["Stable Diffusion Inpainting", "LaMa", "Adobe Firefly"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 3 |
+
{"id": "img-to-img-outpainting", "name": "Image Outpainting", "input_primary": "image", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"transformationTypes\": [\"editing\"], \"modification\": \"selective-editing\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Canvas extension", "Image expansion", "Background generation"], "metadata_platforms": ["Replicate", "DALL-E", "Stability AI"], "metadata_example_models": ["Stable Diffusion Outpainting", "DALL-E Outpainting"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 4 |
+
{"id": "img-to-img-style-transfer", "name": "Image Style Transfer", "input_primary": "image", "input_secondary": ["image", "text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\"], \"modification\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Artistic style application", "Photo stylization", "Creative filters"], "metadata_platforms": ["Replicate", "Stability AI"], "metadata_example_models": ["StyleGAN", "Neural Style Transfer", "InstantStyle"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 5 |
+
{"id": "img-to-img-colorization", "name": "Image Colorization", "input_primary": "image", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"enhancement\"], \"modification\": \"enhancement\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Black and white colorization", "Historical photo restoration", "Photo enhancement"], "metadata_platforms": ["Replicate", "DeOldify"], "metadata_example_models": ["DeOldify", "Colorful Image Colorization"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 6 |
+
{"id": "img-to-img-enhancement", "name": "Image Enhancement", "input_primary": "image", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"enhancement\", \"transformationTypes\": [\"enhancement\"], \"modification\": \"enhancement\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Quality improvement", "Noise reduction", "Sharpening", "Dynamic range enhancement"], "metadata_platforms": ["Topaz", "Adobe", "Replicate"], "metadata_example_models": ["Topaz Photo AI", "Adobe Enhance"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 7 |
+
{"id": "img-to-img-restoration", "name": "Image Restoration", "input_primary": "image", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"enhancement\", \"transformationTypes\": [\"enhancement\"], \"modification\": \"restoration\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Old photo restoration", "Damaged image repair", "Artifact removal"], "metadata_platforms": ["Replicate", "Remini"], "metadata_example_models": ["GFPGAN", "CodeFormer", "Remini"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 8 |
+
{"id": "img-to-img-background-removal", "name": "Background Removal", "input_primary": "image", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"object-editing\"], \"modification\": \"selective-editing\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Background removal", "Subject isolation", "Product photography"], "metadata_platforms": ["Remove.bg", "Adobe", "Replicate"], "metadata_example_models": ["U2-Net", "RMBG", "SAM"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 9 |
+
{"id": "img-to-img-relighting", "name": "Image Relighting", "input_primary": "image", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"enhancement\"], \"modification\": \"enhancement\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Lighting adjustment", "Portrait relighting", "Scene mood change"], "metadata_platforms": ["Experimental", "Adobe"], "metadata_example_models": ["IC-Light"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 10 |
+
{"id": "img-to-img-face-swap", "name": "Face Swap", "input_primary": "image", "input_secondary": ["image"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"object-editing\"], \"modification\": \"selective-editing\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Face replacement", "Identity swap", "Portrait editing"], "metadata_platforms": ["Replicate", "FaceSwap"], "metadata_example_models": ["InsightFace", "SimSwap", "Roop"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 11 |
+
{"id": "img-to-img-depth-map", "name": "Depth Map Generation", "input_primary": "image", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"editing\"]}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Depth estimation", "3D reconstruction prep", "Spatial understanding"], "metadata_platforms": ["Replicate", "HuggingFace"], "metadata_example_models": ["Depth-Anything", "MiDaS", "ZoeDepth"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 12 |
+
{"id": "img-to-img-segmentation", "name": "Image Segmentation", "input_primary": "image", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"object-editing\"], \"modification\": \"selective-editing\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Object isolation", "Semantic segmentation", "Masking"], "metadata_platforms": ["Replicate", "Meta"], "metadata_example_models": ["Segment Anything (SAM)", "Semantic Segment Anything"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
data/taxonomy_text_creation.jsonl
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"id": "audio-to-text-transcription", "name": "Audio to Text (Transcription)", "input_primary": "audio", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Speech transcription", "Meeting notes", "Subtitling", "Accessibility"], "metadata_platforms": ["OpenAI", "AssemblyAI", "Deepgram", "Google Cloud"], "metadata_example_models": ["Whisper", "AssemblyAI", "Deepgram Nova"], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 2 |
+
{"id": "img-to-text-captioning", "name": "Image to Text (Captioning)", "input_primary": "image", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Image description", "Alt text generation", "Scene understanding", "Accessibility"], "metadata_platforms": ["OpenAI", "Google Cloud", "HuggingFace"], "metadata_example_models": ["GPT-4 Vision", "BLIP", "LLaVA", "Gemini Vision"], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 3 |
+
{"id": "img-to-text-ocr", "name": "Image to Text (OCR)", "input_primary": "image", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Text extraction", "Document digitization", "Receipt scanning", "Data entry automation"], "metadata_platforms": ["Google Cloud", "AWS", "Azure", "Tesseract"], "metadata_example_models": ["Google Cloud Vision", "AWS Textract", "Tesseract", "EasyOCR"], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 4 |
+
{"id": "vid-to-text-transcription", "name": "Video to Text (Transcription)", "input_primary": "video", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Video subtitling", "Content indexing", "Meeting transcription", "Accessibility"], "metadata_platforms": ["OpenAI", "AssemblyAI", "YouTube", "Rev"], "metadata_example_models": ["Whisper", "AssemblyAI", "Google Speech-to-Text"], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 5 |
+
{"id": "vid-to-text-captioning", "name": "Video to Text (Captioning/Description)", "input_primary": "video", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video description", "Content summarization", "Scene understanding", "Accessibility"], "metadata_platforms": ["OpenAI", "Google", "Experimental"], "metadata_example_models": ["GPT-4 Vision", "Gemini Video", "Video-LLaMA"], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 6 |
+
{"id": "multimodal-to-text-vqa", "name": "Visual Question Answering", "input_primary": "image", "input_secondary": ["text"], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"guidanceType\": \"multimodal\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Image Q&A", "Visual information retrieval", "Educational applications", "Accessibility"], "metadata_platforms": ["OpenAI", "Anthropic", "Google"], "metadata_example_models": ["GPT-4 Vision", "Claude", "Gemini Vision"], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 7 |
+
{"id": "3d-to-text-description", "name": "3D Model to Text (Description)", "input_primary": "3d-model", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["3D model description", "Asset cataloging", "Model understanding"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 8 |
+
{"id": "music-to-text-transcription", "name": "Music to Text (Transcription)", "input_primary": "music", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Music notation", "Sheet music generation", "MIDI to score"], "metadata_platforms": ["Experimental", "AnthemScore"], "metadata_example_models": ["AnthemScore", "Audio to MIDI"], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 9 |
+
{"id": "audio-to-text-diarization", "name": "Audio to Text (Speaker Diarization)", "input_primary": "audio", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Multi-speaker transcription", "Meeting notes with speakers", "Interview transcription"], "metadata_platforms": ["AssemblyAI", "Deepgram", "Pyannote"], "metadata_example_models": ["Pyannote", "AssemblyAI", "Whisper + Diarization"], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
data/taxonomy_text_editing.jsonl
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"id": "text-to-text-translation", "name": "Text Translation", "input_primary": "text", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"modification\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Language translation", "Localization", "Multilingual content"], "metadata_platforms": ["Google Translate", "DeepL", "OpenAI"], "metadata_example_models": ["Google Translate", "DeepL", "GPT-4", "NLLB"], "relationships": "{}", "output_modality": "text", "operation_type": "editing"}
|
| 2 |
+
{"id": "text-to-text-summarization", "name": "Text Summarization", "input_primary": "text", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"modification\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Document summarization", "Content condensation", "Abstract generation"], "metadata_platforms": ["OpenAI", "Anthropic", "HuggingFace"], "metadata_example_models": ["GPT-4", "Claude", "BART"], "relationships": "{}", "output_modality": "text", "operation_type": "editing"}
|
| 3 |
+
{"id": "text-to-text-paraphrasing", "name": "Text Paraphrasing/Rewriting", "input_primary": "text", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"modification\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Content rewriting", "Style adjustment", "Tone modification"], "metadata_platforms": ["OpenAI", "Anthropic", "QuillBot"], "metadata_example_models": ["GPT-4", "Claude", "QuillBot"], "relationships": "{}", "output_modality": "text", "operation_type": "editing"}
|
| 4 |
+
{"id": "text-to-text-grammar-correction", "name": "Grammar & Spelling Correction", "input_primary": "text", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"enhancement\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Proofreading", "Error correction", "Writing improvement"], "metadata_platforms": ["Grammarly", "LanguageTool", "OpenAI"], "metadata_example_models": ["Grammarly", "LanguageTool", "GPT-4"], "relationships": "{}", "output_modality": "text", "operation_type": "editing"}
|
data/taxonomy_video_creation.jsonl
CHANGED
|
@@ -9,3 +9,8 @@
|
|
| 9 |
{"id": "multimodal-img-audio-to-vid", "name": "Image + Audio to Video", "input_primary": "image", "input_secondary": ["audio"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"reference-based\", \"motionType\": \"audio-driven\", \"lipSync\": false}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Audio-driven animation", "Dance video generation", "Music-driven motion"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 10 |
{"id": "multimodal-text-img-to-vid", "name": "Text + Image to Video", "input_primary": "text", "input_secondary": ["image"], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"guidanceType\": \"text-and-visual\", \"motionType\": \"guided\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Guided video generation", "Controlled animation", "Reference-based video creation"], "metadata_platforms": ["Replicate", "FAL AI"], "metadata_example_models": ["AnimateDiff with ControlNet"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 11 |
{"id": "3d-to-vid", "name": "3D Model to Video", "input_primary": "3d-model", "input_secondary": [], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"rendering\", \"renderType\": \"3d-rendering\", \"motionType\": \"camera-path\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["3D visualization", "Product rendering", "Architectural visualization"], "metadata_platforms": ["Blender", "Unreal Engine", "Unity"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
{"id": "multimodal-img-audio-to-vid", "name": "Image + Audio to Video", "input_primary": "image", "input_secondary": ["audio"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"reference-based\", \"motionType\": \"audio-driven\", \"lipSync\": false}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Audio-driven animation", "Dance video generation", "Music-driven motion"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 10 |
{"id": "multimodal-text-img-to-vid", "name": "Text + Image to Video", "input_primary": "text", "input_secondary": ["image"], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"guidanceType\": \"text-and-visual\", \"motionType\": \"guided\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Guided video generation", "Controlled animation", "Reference-based video creation"], "metadata_platforms": ["Replicate", "FAL AI"], "metadata_example_models": ["AnimateDiff with ControlNet"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 11 |
{"id": "3d-to-vid", "name": "3D Model to Video", "input_primary": "3d-model", "input_secondary": [], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"rendering\", \"renderType\": \"3d-rendering\", \"motionType\": \"camera-path\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["3D visualization", "Product rendering", "Architectural visualization"], "metadata_platforms": ["Blender", "Unreal Engine", "Unity"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 12 |
+
{"id": "music-to-vid", "name": "Music to Video", "input_primary": "music", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "music", "characteristics": "{\"processType\": \"synthesis\", \"audioVisualization\": true, \"motionType\": \"audio-reactive\", \"audioVideoSync\": true}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Music video generation", "Lyric videos", "Album visualizers"], "metadata_platforms": ["Replicate", "Experimental"], "metadata_example_models": ["Stable Diffusion Video"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 13 |
+
{"id": "text-to-vid-music", "name": "Text to Video with Music", "input_primary": "text", "input_secondary": [], "output_primary": "video", "output_audio": true, "output_audio_type": "music", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"synthesized\", \"audioPrompting\": \"text-based\", \"audioVideoSync\": true, \"motionType\": \"general\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Promotional videos", "Social media content", "Advertisement creation"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 14 |
+
{"id": "img-to-vid-music", "name": "Image to Video with Music", "input_primary": "image", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "music", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"synthesized\", \"audioPrompting\": \"text-based\", \"motionType\": \"general\", \"audioCharacteristics\": [\"melodic\", \"rhythmic\"]}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Photo slideshow creation", "Social media posts", "Memory videos"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 15 |
+
{"id": "vid-to-vid-upscale", "name": "Video Upscaling", "input_primary": "video", "input_secondary": [], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"enhancement\", \"audioHandling\": \"passthrough\", \"preserveAudio\": true}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Resolution enhancement", "Quality improvement", "Restoration"], "metadata_platforms": ["Topaz", "Replicate"], "metadata_example_models": ["Topaz Video AI", "Real-ESRGAN Video"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 16 |
+
{"id": "multimodal-vid-text-to-vid", "name": "Video + Text to Video", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"transformation\", \"guidanceType\": \"text-and-visual\", \"audioHandling\": \"passthrough\", \"preserveAudio\": true, \"motionType\": \"guided\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video-guided generation", "Motion transfer", "Style-guided video"], "metadata_platforms": ["Replicate", "RunwayML"], "metadata_example_models": ["Gen-2", "Pika"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
data/taxonomy_video_editing.jsonl
CHANGED
|
@@ -1,2 +1,9 @@
|
|
| 1 |
{"id": "vid-to-vid-no-audio", "name": "Video to Video (No Audio)", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"motion-modification\", \"object-editing\"], \"preserveAudio\": false}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video style transfer", "Video editing", "Motion manipulation"], "metadata_platforms": ["Replicate", "RunwayML"], "metadata_example_models": ["Gen-2", "Video ControlNet"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 2 |
{"id": "vid-to-vid-preserve-audio", "name": "Video to Video (Preserve Audio)", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"motion-modification\", \"object-editing\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video style transfer with audio", "Content transformation maintaining soundtrack"], "metadata_platforms": ["Replicate", "RunwayML"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
{"id": "vid-to-vid-no-audio", "name": "Video to Video (No Audio)", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"motion-modification\", \"object-editing\"], \"preserveAudio\": false}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video style transfer", "Video editing", "Motion manipulation"], "metadata_platforms": ["Replicate", "RunwayML"], "metadata_example_models": ["Gen-2", "Video ControlNet"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 2 |
{"id": "vid-to-vid-preserve-audio", "name": "Video to Video (Preserve Audio)", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"motion-modification\", \"object-editing\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video style transfer with audio", "Content transformation maintaining soundtrack"], "metadata_platforms": ["Replicate", "RunwayML"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 3 |
+
{"id": "vid-to-vid-inpainting", "name": "Video Inpainting", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"inpainting\", \"transformationTypes\": [\"object-editing\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\", \"modification\": \"selective-editing\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Object removal", "Background replacement", "Video cleanup"], "metadata_platforms": ["Replicate", "RunwayML", "Adobe"], "metadata_example_models": ["ProPainter", "E2FGVI"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 4 |
+
{"id": "vid-to-vid-enhancement", "name": "Video Enhancement", "input_primary": "video", "input_secondary": [], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"enhancement\", \"transformationTypes\": [\"enhancement\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Quality improvement", "Noise reduction", "Color grading", "Stabilization"], "metadata_platforms": ["Topaz", "Adobe", "Replicate"], "metadata_example_models": ["Topaz Video AI", "DAIN", "Real-ESRGAN"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 5 |
+
{"id": "vid-to-vid-interpolation", "name": "Video Frame Interpolation", "input_primary": "video", "input_secondary": [], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"enhancement\", \"transformationTypes\": [\"motion-modification\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Frame rate increase", "Slow motion creation", "Smooth motion"], "metadata_platforms": ["Topaz", "Replicate"], "metadata_example_models": ["RIFE", "DAIN", "Flowframes"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 6 |
+
{"id": "vid-to-vid-colorization", "name": "Video Colorization", "input_primary": "video", "input_secondary": [], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"enhancement\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Black and white restoration", "Historical footage colorization", "Archival restoration"], "metadata_platforms": ["Replicate", "DeOldify"], "metadata_example_models": ["DeOldify", "Video Colorization"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 7 |
+
{"id": "vid-to-vid-deepfake", "name": "Video Face Swap", "input_primary": "video", "input_secondary": ["image"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"object-editing\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\", \"modification\": \"selective-editing\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Face replacement", "Character substitution", "Visual effects"], "metadata_platforms": ["Replicate", "DeepFaceLab"], "metadata_example_models": ["DeepFaceLab", "Roop", "FaceSwap"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 8 |
+
{"id": "vid-to-vid-relighting", "name": "Video Relighting", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"enhancement\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Lighting adjustment", "Time of day change", "Mood alteration"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 9 |
+
{"id": "vid-to-vid-segmentation", "name": "Video Segmentation", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"object-editing\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\", \"modification\": \"selective-editing\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Background removal", "Object isolation", "Green screen replacement"], "metadata_platforms": ["Replicate", "Runway", "Unscreen"], "metadata_example_models": ["Segment Anything Video", "XMem", "Cutout.pro"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
data/train.jsonl
CHANGED
|
@@ -9,14 +9,77 @@
|
|
| 9 |
{"id": "multimodal-img-audio-to-vid", "name": "Image + Audio to Video", "input_primary": "image", "input_secondary": ["audio"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"reference-based\", \"motionType\": \"audio-driven\", \"lipSync\": false}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Audio-driven animation", "Dance video generation", "Music-driven motion"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 10 |
{"id": "multimodal-text-img-to-vid", "name": "Text + Image to Video", "input_primary": "text", "input_secondary": ["image"], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"guidanceType\": \"text-and-visual\", \"motionType\": \"guided\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Guided video generation", "Controlled animation", "Reference-based video creation"], "metadata_platforms": ["Replicate", "FAL AI"], "metadata_example_models": ["AnimateDiff with ControlNet"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 11 |
{"id": "3d-to-vid", "name": "3D Model to Video", "input_primary": "3d-model", "input_secondary": [], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"rendering\", \"renderType\": \"3d-rendering\", \"motionType\": \"camera-path\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["3D visualization", "Product rendering", "Architectural visualization"], "metadata_platforms": ["Blender", "Unreal Engine", "Unity"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
{"id": "vid-to-vid-no-audio", "name": "Video to Video (No Audio)", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"motion-modification\", \"object-editing\"], \"preserveAudio\": false}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video style transfer", "Video editing", "Motion manipulation"], "metadata_platforms": ["Replicate", "RunwayML"], "metadata_example_models": ["Gen-2", "Video ControlNet"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 13 |
{"id": "vid-to-vid-preserve-audio", "name": "Video to Video (Preserve Audio)", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"motion-modification\", \"object-editing\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video style transfer with audio", "Content transformation maintaining soundtrack"], "metadata_platforms": ["Replicate", "RunwayML"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
{"id": "text-to-audio", "name": "Text to Audio", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"general\", \"audioCategories\": [\"speech\", \"sound-effects\", \"music\", \"ambient\"]}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Sound effect generation", "Voiceover creation", "Audio asset production"], "metadata_platforms": ["Replicate", "ElevenLabs", "AudioCraft"], "metadata_example_models": ["AudioGen", "MusicGen"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 15 |
{"id": "text-to-speech", "name": "Text to Speech", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "speech", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"speech\", \"voiceCloning\": false}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Narration", "Accessibility", "Voice assistants"], "metadata_platforms": ["ElevenLabs", "Google Cloud", "Azure", "AWS"], "metadata_example_models": ["ElevenLabs", "Google WaveNet", "Azure Neural TTS"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 16 |
{"id": "text-to-music", "name": "Text to Music", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"music\", \"melodic\": true}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Background music generation", "Musical composition", "Soundtrack creation"], "metadata_platforms": ["Replicate", "Stability AI"], "metadata_example_models": ["MusicGen", "Stable Audio"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
{"id": "audio-to-audio-inpainting", "name": "Audio to Audio (Inpainting)", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"inpainting\", \"modification\": \"selective-editing\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Audio editing", "Sound design", "Audio restoration"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 18 |
{"id": "music-to-music-inpainting", "name": "Music to Music (Inpainting)", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"inpainting\", \"modification\": \"selective-editing\", \"melodic\": true, \"audioSubtype\": \"music\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Music editing", "Compositional modifications", "Arrangement changes"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{\"parent\": \"audio-to-audio-inpainting\", \"note\": \"Music inpainting is a specialized subset of audio inpainting\"}", "output_modality": "audio", "operation_type": "editing"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
{"id": "text-to-img", "name": "Text to Image", "input_primary": "text", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"synthesis\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Concept art generation", "Product mockups", "Marketing assets"], "metadata_platforms": ["Replicate", "Stability AI", "Midjourney", "DALL-E"], "metadata_example_models": ["Stable Diffusion", "DALL-E 3", "Midjourney"], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
{"id": "img-to-img", "name": "Image to Image", "input_primary": "image", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"enhancement\", \"editing\", \"inpainting\"]}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Image editing", "Style transfer", "Image enhancement", "Object removal/addition"], "metadata_platforms": ["Replicate", "Stability AI", "Midjourney"], "metadata_example_models": ["Stable Diffusion img2img", "ControlNet"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
{"id": "text-to-3d", "name": "Text to 3D Model", "input_primary": "text", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-synthesis\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["3D asset generation", "Rapid prototyping", "Game asset creation"], "metadata_platforms": ["Replicate", "Meshy", "3DFY"], "metadata_example_models": ["Point-E", "Shap-E", "DreamFusion"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
| 22 |
{"id": "img-to-3d", "name": "Image to 3D Model", "input_primary": "image", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-reconstruction\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["3D reconstruction", "Object digitization", "Asset creation from photos"], "metadata_platforms": ["Replicate", "Meshy", "Luma AI"], "metadata_example_models": ["Zero-1-to-3", "Wonder3D"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
{"id": "multimodal-img-audio-to-vid", "name": "Image + Audio to Video", "input_primary": "image", "input_secondary": ["audio"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"reference-based\", \"motionType\": \"audio-driven\", \"lipSync\": false}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Audio-driven animation", "Dance video generation", "Music-driven motion"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 10 |
{"id": "multimodal-text-img-to-vid", "name": "Text + Image to Video", "input_primary": "text", "input_secondary": ["image"], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"guidanceType\": \"text-and-visual\", \"motionType\": \"guided\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Guided video generation", "Controlled animation", "Reference-based video creation"], "metadata_platforms": ["Replicate", "FAL AI"], "metadata_example_models": ["AnimateDiff with ControlNet"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 11 |
{"id": "3d-to-vid", "name": "3D Model to Video", "input_primary": "3d-model", "input_secondary": [], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"rendering\", \"renderType\": \"3d-rendering\", \"motionType\": \"camera-path\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["3D visualization", "Product rendering", "Architectural visualization"], "metadata_platforms": ["Blender", "Unreal Engine", "Unity"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 12 |
+
{"id": "music-to-vid", "name": "Music to Video", "input_primary": "music", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "music", "characteristics": "{\"processType\": \"synthesis\", \"audioVisualization\": true, \"motionType\": \"audio-reactive\", \"audioVideoSync\": true}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Music video generation", "Lyric videos", "Album visualizers"], "metadata_platforms": ["Replicate", "Experimental"], "metadata_example_models": ["Stable Diffusion Video"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 13 |
+
{"id": "text-to-vid-music", "name": "Text to Video with Music", "input_primary": "text", "input_secondary": [], "output_primary": "video", "output_audio": true, "output_audio_type": "music", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"synthesized\", \"audioPrompting\": \"text-based\", \"audioVideoSync\": true, \"motionType\": \"general\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Promotional videos", "Social media content", "Advertisement creation"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 14 |
+
{"id": "img-to-vid-music", "name": "Image to Video with Music", "input_primary": "image", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "music", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"synthesized\", \"audioPrompting\": \"text-based\", \"motionType\": \"general\", \"audioCharacteristics\": [\"melodic\", \"rhythmic\"]}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Photo slideshow creation", "Social media posts", "Memory videos"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 15 |
+
{"id": "vid-to-vid-upscale", "name": "Video Upscaling", "input_primary": "video", "input_secondary": [], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"enhancement\", \"audioHandling\": \"passthrough\", \"preserveAudio\": true}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Resolution enhancement", "Quality improvement", "Restoration"], "metadata_platforms": ["Topaz", "Replicate"], "metadata_example_models": ["Topaz Video AI", "Real-ESRGAN Video"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 16 |
+
{"id": "multimodal-vid-text-to-vid", "name": "Video + Text to Video", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"transformation\", \"guidanceType\": \"text-and-visual\", \"audioHandling\": \"passthrough\", \"preserveAudio\": true, \"motionType\": \"guided\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video-guided generation", "Motion transfer", "Style-guided video"], "metadata_platforms": ["Replicate", "RunwayML"], "metadata_example_models": ["Gen-2", "Pika"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 17 |
{"id": "vid-to-vid-no-audio", "name": "Video to Video (No Audio)", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"motion-modification\", \"object-editing\"], \"preserveAudio\": false}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video style transfer", "Video editing", "Motion manipulation"], "metadata_platforms": ["Replicate", "RunwayML"], "metadata_example_models": ["Gen-2", "Video ControlNet"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 18 |
{"id": "vid-to-vid-preserve-audio", "name": "Video to Video (Preserve Audio)", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"motion-modification\", \"object-editing\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video style transfer with audio", "Content transformation maintaining soundtrack"], "metadata_platforms": ["Replicate", "RunwayML"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 19 |
+
{"id": "vid-to-vid-inpainting", "name": "Video Inpainting", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"inpainting\", \"transformationTypes\": [\"object-editing\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\", \"modification\": \"selective-editing\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Object removal", "Background replacement", "Video cleanup"], "metadata_platforms": ["Replicate", "RunwayML", "Adobe"], "metadata_example_models": ["ProPainter", "E2FGVI"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 20 |
+
{"id": "vid-to-vid-enhancement", "name": "Video Enhancement", "input_primary": "video", "input_secondary": [], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"enhancement\", \"transformationTypes\": [\"enhancement\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Quality improvement", "Noise reduction", "Color grading", "Stabilization"], "metadata_platforms": ["Topaz", "Adobe", "Replicate"], "metadata_example_models": ["Topaz Video AI", "DAIN", "Real-ESRGAN"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 21 |
+
{"id": "vid-to-vid-interpolation", "name": "Video Frame Interpolation", "input_primary": "video", "input_secondary": [], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"enhancement\", \"transformationTypes\": [\"motion-modification\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Frame rate increase", "Slow motion creation", "Smooth motion"], "metadata_platforms": ["Topaz", "Replicate"], "metadata_example_models": ["RIFE", "DAIN", "Flowframes"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 22 |
+
{"id": "vid-to-vid-colorization", "name": "Video Colorization", "input_primary": "video", "input_secondary": [], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"enhancement\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Black and white restoration", "Historical footage colorization", "Archival restoration"], "metadata_platforms": ["Replicate", "DeOldify"], "metadata_example_models": ["DeOldify", "Video Colorization"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 23 |
+
{"id": "vid-to-vid-deepfake", "name": "Video Face Swap", "input_primary": "video", "input_secondary": ["image"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"object-editing\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\", \"modification\": \"selective-editing\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Face replacement", "Character substitution", "Visual effects"], "metadata_platforms": ["Replicate", "DeepFaceLab"], "metadata_example_models": ["DeepFaceLab", "Roop", "FaceSwap"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 24 |
+
{"id": "vid-to-vid-relighting", "name": "Video Relighting", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"enhancement\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Lighting adjustment", "Time of day change", "Mood alteration"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 25 |
+
{"id": "vid-to-vid-segmentation", "name": "Video Segmentation", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"object-editing\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\", \"modification\": \"selective-editing\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Background removal", "Object isolation", "Green screen replacement"], "metadata_platforms": ["Replicate", "Runway", "Unscreen"], "metadata_example_models": ["Segment Anything Video", "XMem", "Cutout.pro"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 26 |
{"id": "text-to-audio", "name": "Text to Audio", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"general\", \"audioCategories\": [\"speech\", \"sound-effects\", \"music\", \"ambient\"]}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Sound effect generation", "Voiceover creation", "Audio asset production"], "metadata_platforms": ["Replicate", "ElevenLabs", "AudioCraft"], "metadata_example_models": ["AudioGen", "MusicGen"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 27 |
{"id": "text-to-speech", "name": "Text to Speech", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "speech", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"speech\", \"voiceCloning\": false}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Narration", "Accessibility", "Voice assistants"], "metadata_platforms": ["ElevenLabs", "Google Cloud", "Azure", "AWS"], "metadata_example_models": ["ElevenLabs", "Google WaveNet", "Azure Neural TTS"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 28 |
{"id": "text-to-music", "name": "Text to Music", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"music\", \"melodic\": true}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Background music generation", "Musical composition", "Soundtrack creation"], "metadata_platforms": ["Replicate", "Stability AI"], "metadata_example_models": ["MusicGen", "Stable Audio"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 29 |
+
{"id": "text-to-speech-voice-clone", "name": "Text to Speech (Voice Cloning)", "input_primary": "text", "input_secondary": ["audio"], "output_primary": "audio", "output_audio": false, "output_audio_type": "speech", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"speech\", \"voiceCloning\": true}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Custom voice synthesis", "Personalized narration", "Voice preservation"], "metadata_platforms": ["ElevenLabs", "Replicate", "PlayHT"], "metadata_example_models": ["ElevenLabs", "XTTS", "Bark"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 30 |
+
{"id": "text-to-sound-effects", "name": "Text to Sound Effects", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "sound-effects", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"sound-effects\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["SFX generation", "Foley creation", "Game audio"], "metadata_platforms": ["Replicate", "AudioCraft"], "metadata_example_models": ["AudioGen", "AudioLDM"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 31 |
+
{"id": "img-to-audio", "name": "Image to Audio", "input_primary": "image", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"general\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Image sonification", "Scene audio generation", "Accessibility"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 32 |
+
{"id": "music-to-music-style", "name": "Music Style Transfer", "input_primary": "music", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"transformation\", \"audioType\": \"music\", \"melodic\": true}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Genre transformation", "Instrument swap", "Musical reimagining"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 33 |
+
{"id": "vid-to-audio-extraction", "name": "Video to Audio Extraction", "input_primary": "video", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"transformation\", \"audioType\": \"general\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Audio extraction", "Soundtrack isolation", "Voice extraction"], "metadata_platforms": ["FFmpeg", "Standard tools"], "metadata_example_models": [], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 34 |
+
{"id": "humming-to-music", "name": "Humming to Music", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"music\", \"melodic\": true}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Melody to full track", "Musical idea development", "Composition assistance"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 35 |
+
{"id": "lyrics-to-music", "name": "Lyrics to Music", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"music\", \"melodic\": true, \"voiceCloning\": false}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Song generation", "Music composition", "Vocal track creation"], "metadata_platforms": ["Suno", "Udio", "Replicate"], "metadata_example_models": ["Suno", "Udio", "MusicGen"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 36 |
{"id": "audio-to-audio-inpainting", "name": "Audio to Audio (Inpainting)", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"inpainting\", \"modification\": \"selective-editing\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Audio editing", "Sound design", "Audio restoration"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 37 |
{"id": "music-to-music-inpainting", "name": "Music to Music (Inpainting)", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"inpainting\", \"modification\": \"selective-editing\", \"melodic\": true, \"audioSubtype\": \"music\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Music editing", "Compositional modifications", "Arrangement changes"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{\"parent\": \"audio-to-audio-inpainting\", \"note\": \"Music inpainting is a specialized subset of audio inpainting\"}", "output_modality": "audio", "operation_type": "editing"}
|
| 38 |
+
{"id": "audio-to-audio-enhancement", "name": "Audio Enhancement", "input_primary": "audio", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"enhancement\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Noise reduction", "Quality improvement", "Audio cleanup"], "metadata_platforms": ["Adobe", "iZotope", "Replicate"], "metadata_example_models": ["Adobe Podcast", "Krisp", "Denoiser"], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 39 |
+
{"id": "audio-to-audio-restoration", "name": "Audio Restoration", "input_primary": "audio", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"enhancement\", \"modification\": \"restoration\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Historical recording restoration", "Audio artifact removal", "Damaged audio repair"], "metadata_platforms": ["iZotope", "Adobe", "Accusonus"], "metadata_example_models": ["iZotope RX", "Adobe Audition"], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 40 |
+
{"id": "audio-to-audio-voice-conversion", "name": "Voice Conversion", "input_primary": "audio", "input_secondary": ["audio"], "output_primary": "audio", "output_audio": false, "output_audio_type": "speech", "characteristics": "{\"processType\": \"transformation\", \"audioType\": \"speech\", \"voiceCloning\": true, \"modification\": \"transformation\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Voice swapping", "Dubbing", "Voice translation"], "metadata_platforms": ["ElevenLabs", "Respeecher", "Replicate"], "metadata_example_models": ["RVC", "So-VITS-SVC", "Respeecher"], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 41 |
+
{"id": "music-to-music-stem-separation", "name": "Music Stem Separation", "input_primary": "music", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"transformation\", \"audioType\": \"music\", \"modification\": \"selective-editing\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Vocal isolation", "Instrument extraction", "Remixing", "Karaoke creation"], "metadata_platforms": ["Spleeter", "Demucs", "Replicate"], "metadata_example_models": ["Demucs", "Spleeter", "Ultimate Vocal Remover"], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 42 |
+
{"id": "audio-to-audio-speed-change", "name": "Audio Speed/Pitch Modification", "input_primary": "audio", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"transformation\", \"modification\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Tempo adjustment", "Pitch shifting", "Time stretching"], "metadata_platforms": ["Standard tools", "Adobe"], "metadata_example_models": ["Rubber Band", "Paulstretch"], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 43 |
+
{"id": "music-to-music-mastering", "name": "AI Music Mastering", "input_primary": "music", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"enhancement\", \"audioType\": \"music\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Automated mastering", "Mix enhancement", "Final polish"], "metadata_platforms": ["LANDR", "iZotope", "eMastered"], "metadata_example_models": ["LANDR", "iZotope Ozone"], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 44 |
+
{"id": "audio-to-audio-spatial", "name": "Spatial Audio Conversion", "input_primary": "audio", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"transformation\", \"modification\": \"transformation\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Stereo to spatial", "Binaural conversion", "3D audio creation"], "metadata_platforms": ["Dolby", "Sony", "Experimental"], "metadata_example_models": ["Dolby Atmos", "Sony 360 Reality Audio"], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 45 |
{"id": "text-to-img", "name": "Text to Image", "input_primary": "text", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"synthesis\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Concept art generation", "Product mockups", "Marketing assets"], "metadata_platforms": ["Replicate", "Stability AI", "Midjourney", "DALL-E"], "metadata_example_models": ["Stable Diffusion", "DALL-E 3", "Midjourney"], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
| 46 |
+
{"id": "text-img-to-img", "name": "Text + Image to Image", "input_primary": "text", "input_secondary": ["image"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"synthesis\", \"guidanceType\": \"text-and-visual\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Image-guided generation", "Style reference", "Composition guidance"], "metadata_platforms": ["Replicate", "Stability AI", "Midjourney"], "metadata_example_models": ["Stable Diffusion with ControlNet", "DALL-E 3", "Midjourney"], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
| 47 |
+
{"id": "img-to-img-upscale", "name": "Image Upscaling", "input_primary": "image", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"enhancement\", \"generationType\": \"synthesis\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Resolution enhancement", "Quality improvement", "Detail enhancement"], "metadata_platforms": ["Topaz", "Replicate", "Stability AI"], "metadata_example_models": ["Real-ESRGAN", "Topaz Gigapixel", "SUPIR"], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
| 48 |
+
{"id": "vid-to-img-frame-extraction", "name": "Video to Image (Frame Extraction)", "input_primary": "video", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"generationType\": \"synthesis\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Frame extraction", "Thumbnail generation", "Video analysis"], "metadata_platforms": ["FFmpeg", "Standard tools"], "metadata_example_models": [], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
| 49 |
+
{"id": "3d-to-img-render", "name": "3D to Image (Rendering)", "input_primary": "3d-model", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"rendering\", \"renderType\": \"3d-rendering\", \"generationType\": \"synthesis\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Product rendering", "3D visualization", "Architectural rendering"], "metadata_platforms": ["Blender", "Unreal Engine", "Unity"], "metadata_example_models": [], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
| 50 |
+
{"id": "audio-to-img-visualization", "name": "Audio to Image (Visualization)", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"audioVisualization\": true, \"generationType\": \"synthesis\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Album art generation", "Sound visualization", "Music imagery"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
| 51 |
+
{"id": "sketch-to-img", "name": "Sketch to Image", "input_primary": "image", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"synthesis\", \"guidanceType\": \"text-and-visual\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Sketch refinement", "Concept development", "Design exploration"], "metadata_platforms": ["Replicate", "Stability AI"], "metadata_example_models": ["ControlNet Scribble", "Pix2Pix"], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
| 52 |
{"id": "img-to-img", "name": "Image to Image", "input_primary": "image", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"enhancement\", \"editing\", \"inpainting\"]}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Image editing", "Style transfer", "Image enhancement", "Object removal/addition"], "metadata_platforms": ["Replicate", "Stability AI", "Midjourney"], "metadata_example_models": ["Stable Diffusion img2img", "ControlNet"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 53 |
+
{"id": "img-to-img-inpainting", "name": "Image Inpainting", "input_primary": "image", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"inpainting\", \"transformationTypes\": [\"inpainting\"], \"modification\": \"selective-editing\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Object removal", "Background extension", "Image repair", "Content-aware fill"], "metadata_platforms": ["Replicate", "Adobe", "Stability AI"], "metadata_example_models": ["Stable Diffusion Inpainting", "LaMa", "Adobe Firefly"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 54 |
+
{"id": "img-to-img-outpainting", "name": "Image Outpainting", "input_primary": "image", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"transformationTypes\": [\"editing\"], \"modification\": \"selective-editing\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Canvas extension", "Image expansion", "Background generation"], "metadata_platforms": ["Replicate", "DALL-E", "Stability AI"], "metadata_example_models": ["Stable Diffusion Outpainting", "DALL-E Outpainting"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 55 |
+
{"id": "img-to-img-style-transfer", "name": "Image Style Transfer", "input_primary": "image", "input_secondary": ["image", "text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\"], \"modification\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Artistic style application", "Photo stylization", "Creative filters"], "metadata_platforms": ["Replicate", "Stability AI"], "metadata_example_models": ["StyleGAN", "Neural Style Transfer", "InstantStyle"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 56 |
+
{"id": "img-to-img-colorization", "name": "Image Colorization", "input_primary": "image", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"enhancement\"], \"modification\": \"enhancement\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Black and white colorization", "Historical photo restoration", "Photo enhancement"], "metadata_platforms": ["Replicate", "DeOldify"], "metadata_example_models": ["DeOldify", "Colorful Image Colorization"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 57 |
+
{"id": "img-to-img-enhancement", "name": "Image Enhancement", "input_primary": "image", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"enhancement\", \"transformationTypes\": [\"enhancement\"], \"modification\": \"enhancement\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Quality improvement", "Noise reduction", "Sharpening", "Dynamic range enhancement"], "metadata_platforms": ["Topaz", "Adobe", "Replicate"], "metadata_example_models": ["Topaz Photo AI", "Adobe Enhance"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 58 |
+
{"id": "img-to-img-restoration", "name": "Image Restoration", "input_primary": "image", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"enhancement\", \"transformationTypes\": [\"enhancement\"], \"modification\": \"restoration\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Old photo restoration", "Damaged image repair", "Artifact removal"], "metadata_platforms": ["Replicate", "Remini"], "metadata_example_models": ["GFPGAN", "CodeFormer", "Remini"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 59 |
+
{"id": "img-to-img-background-removal", "name": "Background Removal", "input_primary": "image", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"object-editing\"], \"modification\": \"selective-editing\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Background removal", "Subject isolation", "Product photography"], "metadata_platforms": ["Remove.bg", "Adobe", "Replicate"], "metadata_example_models": ["U2-Net", "RMBG", "SAM"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 60 |
+
{"id": "img-to-img-relighting", "name": "Image Relighting", "input_primary": "image", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"enhancement\"], \"modification\": \"enhancement\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Lighting adjustment", "Portrait relighting", "Scene mood change"], "metadata_platforms": ["Experimental", "Adobe"], "metadata_example_models": ["IC-Light"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 61 |
+
{"id": "img-to-img-face-swap", "name": "Face Swap", "input_primary": "image", "input_secondary": ["image"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"object-editing\"], \"modification\": \"selective-editing\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Face replacement", "Identity swap", "Portrait editing"], "metadata_platforms": ["Replicate", "FaceSwap"], "metadata_example_models": ["InsightFace", "SimSwap", "Roop"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 62 |
+
{"id": "img-to-img-depth-map", "name": "Depth Map Generation", "input_primary": "image", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"editing\"]}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Depth estimation", "3D reconstruction prep", "Spatial understanding"], "metadata_platforms": ["Replicate", "HuggingFace"], "metadata_example_models": ["Depth-Anything", "MiDaS", "ZoeDepth"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 63 |
+
{"id": "img-to-img-segmentation", "name": "Image Segmentation", "input_primary": "image", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"object-editing\"], \"modification\": \"selective-editing\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Object isolation", "Semantic segmentation", "Masking"], "metadata_platforms": ["Replicate", "Meta"], "metadata_example_models": ["Segment Anything (SAM)", "Semantic Segment Anything"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 64 |
+
{"id": "audio-to-text-transcription", "name": "Audio to Text (Transcription)", "input_primary": "audio", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Speech transcription", "Meeting notes", "Subtitling", "Accessibility"], "metadata_platforms": ["OpenAI", "AssemblyAI", "Deepgram", "Google Cloud"], "metadata_example_models": ["Whisper", "AssemblyAI", "Deepgram Nova"], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 65 |
+
{"id": "img-to-text-captioning", "name": "Image to Text (Captioning)", "input_primary": "image", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Image description", "Alt text generation", "Scene understanding", "Accessibility"], "metadata_platforms": ["OpenAI", "Google Cloud", "HuggingFace"], "metadata_example_models": ["GPT-4 Vision", "BLIP", "LLaVA", "Gemini Vision"], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 66 |
+
{"id": "img-to-text-ocr", "name": "Image to Text (OCR)", "input_primary": "image", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Text extraction", "Document digitization", "Receipt scanning", "Data entry automation"], "metadata_platforms": ["Google Cloud", "AWS", "Azure", "Tesseract"], "metadata_example_models": ["Google Cloud Vision", "AWS Textract", "Tesseract", "EasyOCR"], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 67 |
+
{"id": "vid-to-text-transcription", "name": "Video to Text (Transcription)", "input_primary": "video", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Video subtitling", "Content indexing", "Meeting transcription", "Accessibility"], "metadata_platforms": ["OpenAI", "AssemblyAI", "YouTube", "Rev"], "metadata_example_models": ["Whisper", "AssemblyAI", "Google Speech-to-Text"], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 68 |
+
{"id": "vid-to-text-captioning", "name": "Video to Text (Captioning/Description)", "input_primary": "video", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video description", "Content summarization", "Scene understanding", "Accessibility"], "metadata_platforms": ["OpenAI", "Google", "Experimental"], "metadata_example_models": ["GPT-4 Vision", "Gemini Video", "Video-LLaMA"], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 69 |
+
{"id": "multimodal-to-text-vqa", "name": "Visual Question Answering", "input_primary": "image", "input_secondary": ["text"], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"guidanceType\": \"multimodal\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Image Q&A", "Visual information retrieval", "Educational applications", "Accessibility"], "metadata_platforms": ["OpenAI", "Anthropic", "Google"], "metadata_example_models": ["GPT-4 Vision", "Claude", "Gemini Vision"], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 70 |
+
{"id": "3d-to-text-description", "name": "3D Model to Text (Description)", "input_primary": "3d-model", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["3D model description", "Asset cataloging", "Model understanding"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 71 |
+
{"id": "music-to-text-transcription", "name": "Music to Text (Transcription)", "input_primary": "music", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Music notation", "Sheet music generation", "MIDI to score"], "metadata_platforms": ["Experimental", "AnthemScore"], "metadata_example_models": ["AnthemScore", "Audio to MIDI"], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 72 |
+
{"id": "audio-to-text-diarization", "name": "Audio to Text (Speaker Diarization)", "input_primary": "audio", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Multi-speaker transcription", "Meeting notes with speakers", "Interview transcription"], "metadata_platforms": ["AssemblyAI", "Deepgram", "Pyannote"], "metadata_example_models": ["Pyannote", "AssemblyAI", "Whisper + Diarization"], "relationships": "{}", "output_modality": "text", "operation_type": "creation"}
|
| 73 |
+
{"id": "text-to-text-translation", "name": "Text Translation", "input_primary": "text", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"modification\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Language translation", "Localization", "Multilingual content"], "metadata_platforms": ["Google Translate", "DeepL", "OpenAI"], "metadata_example_models": ["Google Translate", "DeepL", "GPT-4", "NLLB"], "relationships": "{}", "output_modality": "text", "operation_type": "editing"}
|
| 74 |
+
{"id": "text-to-text-summarization", "name": "Text Summarization", "input_primary": "text", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"modification\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Document summarization", "Content condensation", "Abstract generation"], "metadata_platforms": ["OpenAI", "Anthropic", "HuggingFace"], "metadata_example_models": ["GPT-4", "Claude", "BART"], "relationships": "{}", "output_modality": "text", "operation_type": "editing"}
|
| 75 |
+
{"id": "text-to-text-paraphrasing", "name": "Text Paraphrasing/Rewriting", "input_primary": "text", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"modification\": \"transformation\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Content rewriting", "Style adjustment", "Tone modification"], "metadata_platforms": ["OpenAI", "Anthropic", "QuillBot"], "metadata_example_models": ["GPT-4", "Claude", "QuillBot"], "relationships": "{}", "output_modality": "text", "operation_type": "editing"}
|
| 76 |
+
{"id": "text-to-text-grammar-correction", "name": "Grammar & Spelling Correction", "input_primary": "text", "input_secondary": [], "output_primary": "text", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"enhancement\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Proofreading", "Error correction", "Writing improvement"], "metadata_platforms": ["Grammarly", "LanguageTool", "OpenAI"], "metadata_example_models": ["Grammarly", "LanguageTool", "GPT-4"], "relationships": "{}", "output_modality": "text", "operation_type": "editing"}
|
| 77 |
{"id": "text-to-3d", "name": "Text to 3D Model", "input_primary": "text", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-synthesis\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["3D asset generation", "Rapid prototyping", "Game asset creation"], "metadata_platforms": ["Replicate", "Meshy", "3DFY"], "metadata_example_models": ["Point-E", "Shap-E", "DreamFusion"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
| 78 |
{"id": "img-to-3d", "name": "Image to 3D Model", "input_primary": "image", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-reconstruction\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["3D reconstruction", "Object digitization", "Asset creation from photos"], "metadata_platforms": ["Replicate", "Meshy", "Luma AI"], "metadata_example_models": ["Zero-1-to-3", "Wonder3D"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
| 79 |
+
{"id": "multimodal-img-to-3d", "name": "Multi-Image to 3D Model", "input_primary": "image", "input_secondary": ["image"], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-reconstruction\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Photogrammetry", "3D scanning", "Object reconstruction"], "metadata_platforms": ["Luma AI", "Polycam", "Reality Capture"], "metadata_example_models": ["NeRF", "Gaussian Splatting", "Photogrammetry"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
| 80 |
+
{"id": "vid-to-3d", "name": "Video to 3D Model", "input_primary": "video", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-reconstruction\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video-based reconstruction", "Motion to 3D", "Scene capture"], "metadata_platforms": ["Luma AI", "Polycam"], "metadata_example_models": ["NeRF", "Gaussian Splatting"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
| 81 |
+
{"id": "text-img-to-3d", "name": "Text + Image to 3D Model", "input_primary": "text", "input_secondary": ["image"], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-synthesis\", \"guidanceType\": \"text-and-visual\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Guided 3D generation", "Controlled asset creation", "Reference-based modeling"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
| 82 |
+
{"id": "3d-to-3d-optimization", "name": "3D Model Optimization", "input_primary": "3d-model", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"enhancement\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Polygon reduction", "LOD generation", "Performance optimization"], "metadata_platforms": ["Blender", "Maya", "Simplygon"], "metadata_example_models": [], "relationships": "{}", "output_modality": "3d-model", "operation_type": "editing"}
|
| 83 |
+
{"id": "3d-to-3d-texturing", "name": "3D Model Texturing", "input_primary": "3d-model", "input_secondary": ["text"], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"modification\": \"enhancement\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Texture generation", "Material application", "PBR material creation"], "metadata_platforms": ["Experimental", "Adobe Substance"], "metadata_example_models": ["TEXTure", "Text2Tex"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "editing"}
|
| 84 |
+
{"id": "3d-to-3d-rigging", "name": "3D Model Rigging", "input_primary": "3d-model", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"modification\": \"transformation\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Auto-rigging", "Skeleton generation", "Animation preparation"], "metadata_platforms": ["Mixamo", "AccuRIG"], "metadata_example_models": ["Mixamo Auto-Rigger"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "editing"}
|
| 85 |
+
{"id": "3d-to-3d-style-transfer", "name": "3D Style Transfer", "input_primary": "3d-model", "input_secondary": ["text", "image"], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"modification\": \"transformation\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Artistic 3D styling", "Model transformation", "Creative effects"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "3d-model", "operation_type": "editing"}
|
taxonomy/3d-generation/creation/modalities.json
CHANGED
|
@@ -55,6 +55,82 @@
|
|
| 55 |
"platforms": ["Replicate", "Meshy", "Luma AI"],
|
| 56 |
"exampleModels": ["Zero-1-to-3", "Wonder3D"]
|
| 57 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
}
|
| 59 |
]
|
| 60 |
}
|
|
|
|
| 55 |
"platforms": ["Replicate", "Meshy", "Luma AI"],
|
| 56 |
"exampleModels": ["Zero-1-to-3", "Wonder3D"]
|
| 57 |
}
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"id": "multimodal-img-to-3d",
|
| 61 |
+
"name": "Multi-Image to 3D Model",
|
| 62 |
+
"input": {
|
| 63 |
+
"primary": "image",
|
| 64 |
+
"secondary": ["image"]
|
| 65 |
+
},
|
| 66 |
+
"output": {
|
| 67 |
+
"primary": "3d-model"
|
| 68 |
+
},
|
| 69 |
+
"characteristics": {
|
| 70 |
+
"processType": "synthesis",
|
| 71 |
+
"generationType": "3d-reconstruction"
|
| 72 |
+
},
|
| 73 |
+
"metadata": {
|
| 74 |
+
"maturityLevel": "mature",
|
| 75 |
+
"commonUseCases": [
|
| 76 |
+
"Photogrammetry",
|
| 77 |
+
"3D scanning",
|
| 78 |
+
"Object reconstruction"
|
| 79 |
+
],
|
| 80 |
+
"platforms": ["Luma AI", "Polycam", "Reality Capture"],
|
| 81 |
+
"exampleModels": ["NeRF", "Gaussian Splatting", "Photogrammetry"]
|
| 82 |
+
}
|
| 83 |
+
},
|
| 84 |
+
{
|
| 85 |
+
"id": "vid-to-3d",
|
| 86 |
+
"name": "Video to 3D Model",
|
| 87 |
+
"input": {
|
| 88 |
+
"primary": "video",
|
| 89 |
+
"secondary": []
|
| 90 |
+
},
|
| 91 |
+
"output": {
|
| 92 |
+
"primary": "3d-model"
|
| 93 |
+
},
|
| 94 |
+
"characteristics": {
|
| 95 |
+
"processType": "synthesis",
|
| 96 |
+
"generationType": "3d-reconstruction"
|
| 97 |
+
},
|
| 98 |
+
"metadata": {
|
| 99 |
+
"maturityLevel": "emerging",
|
| 100 |
+
"commonUseCases": [
|
| 101 |
+
"Video-based reconstruction",
|
| 102 |
+
"Motion to 3D",
|
| 103 |
+
"Scene capture"
|
| 104 |
+
],
|
| 105 |
+
"platforms": ["Luma AI", "Polycam"],
|
| 106 |
+
"exampleModels": ["NeRF", "Gaussian Splatting"]
|
| 107 |
+
}
|
| 108 |
+
},
|
| 109 |
+
{
|
| 110 |
+
"id": "text-img-to-3d",
|
| 111 |
+
"name": "Text + Image to 3D Model",
|
| 112 |
+
"input": {
|
| 113 |
+
"primary": "text",
|
| 114 |
+
"secondary": ["image"]
|
| 115 |
+
},
|
| 116 |
+
"output": {
|
| 117 |
+
"primary": "3d-model"
|
| 118 |
+
},
|
| 119 |
+
"characteristics": {
|
| 120 |
+
"processType": "synthesis",
|
| 121 |
+
"generationType": "3d-synthesis",
|
| 122 |
+
"guidanceType": "text-and-visual"
|
| 123 |
+
},
|
| 124 |
+
"metadata": {
|
| 125 |
+
"maturityLevel": "experimental",
|
| 126 |
+
"commonUseCases": [
|
| 127 |
+
"Guided 3D generation",
|
| 128 |
+
"Controlled asset creation",
|
| 129 |
+
"Reference-based modeling"
|
| 130 |
+
],
|
| 131 |
+
"platforms": ["Experimental"],
|
| 132 |
+
"exampleModels": []
|
| 133 |
+
}
|
| 134 |
}
|
| 135 |
]
|
| 136 |
}
|
taxonomy/3d-generation/editing/modalities.json
CHANGED
|
@@ -2,6 +2,107 @@
|
|
| 2 |
"fileType": "multimodal-ai-taxonomy",
|
| 3 |
"outputModality": "3d-model",
|
| 4 |
"operationType": "editing",
|
| 5 |
-
"description": "Modalities for editing and transforming existing 3D model content
|
| 6 |
-
"modalities": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
}
|
|
|
|
| 2 |
"fileType": "multimodal-ai-taxonomy",
|
| 3 |
"outputModality": "3d-model",
|
| 4 |
"operationType": "editing",
|
| 5 |
+
"description": "Modalities for editing and transforming existing 3D model content",
|
| 6 |
+
"modalities": [
|
| 7 |
+
{
|
| 8 |
+
"id": "3d-to-3d-optimization",
|
| 9 |
+
"name": "3D Model Optimization",
|
| 10 |
+
"input": {
|
| 11 |
+
"primary": "3d-model",
|
| 12 |
+
"secondary": []
|
| 13 |
+
},
|
| 14 |
+
"output": {
|
| 15 |
+
"primary": "3d-model"
|
| 16 |
+
},
|
| 17 |
+
"characteristics": {
|
| 18 |
+
"processType": "enhancement",
|
| 19 |
+
"modification": "enhancement"
|
| 20 |
+
},
|
| 21 |
+
"metadata": {
|
| 22 |
+
"maturityLevel": "mature",
|
| 23 |
+
"commonUseCases": [
|
| 24 |
+
"Polygon reduction",
|
| 25 |
+
"LOD generation",
|
| 26 |
+
"Performance optimization"
|
| 27 |
+
],
|
| 28 |
+
"platforms": ["Blender", "Maya", "Simplygon"],
|
| 29 |
+
"exampleModels": []
|
| 30 |
+
}
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"id": "3d-to-3d-texturing",
|
| 34 |
+
"name": "3D Model Texturing",
|
| 35 |
+
"input": {
|
| 36 |
+
"primary": "3d-model",
|
| 37 |
+
"secondary": ["text"]
|
| 38 |
+
},
|
| 39 |
+
"output": {
|
| 40 |
+
"primary": "3d-model"
|
| 41 |
+
},
|
| 42 |
+
"characteristics": {
|
| 43 |
+
"processType": "transformation",
|
| 44 |
+
"modification": "enhancement"
|
| 45 |
+
},
|
| 46 |
+
"metadata": {
|
| 47 |
+
"maturityLevel": "emerging",
|
| 48 |
+
"commonUseCases": [
|
| 49 |
+
"Texture generation",
|
| 50 |
+
"Material application",
|
| 51 |
+
"PBR material creation"
|
| 52 |
+
],
|
| 53 |
+
"platforms": ["Experimental", "Adobe Substance"],
|
| 54 |
+
"exampleModels": ["TEXTure", "Text2Tex"]
|
| 55 |
+
}
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"id": "3d-to-3d-rigging",
|
| 59 |
+
"name": "3D Model Rigging",
|
| 60 |
+
"input": {
|
| 61 |
+
"primary": "3d-model",
|
| 62 |
+
"secondary": []
|
| 63 |
+
},
|
| 64 |
+
"output": {
|
| 65 |
+
"primary": "3d-model"
|
| 66 |
+
},
|
| 67 |
+
"characteristics": {
|
| 68 |
+
"processType": "transformation",
|
| 69 |
+
"modification": "transformation"
|
| 70 |
+
},
|
| 71 |
+
"metadata": {
|
| 72 |
+
"maturityLevel": "emerging",
|
| 73 |
+
"commonUseCases": [
|
| 74 |
+
"Auto-rigging",
|
| 75 |
+
"Skeleton generation",
|
| 76 |
+
"Animation preparation"
|
| 77 |
+
],
|
| 78 |
+
"platforms": ["Mixamo", "AccuRIG"],
|
| 79 |
+
"exampleModels": ["Mixamo Auto-Rigger"]
|
| 80 |
+
}
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"id": "3d-to-3d-style-transfer",
|
| 84 |
+
"name": "3D Style Transfer",
|
| 85 |
+
"input": {
|
| 86 |
+
"primary": "3d-model",
|
| 87 |
+
"secondary": ["text", "image"]
|
| 88 |
+
},
|
| 89 |
+
"output": {
|
| 90 |
+
"primary": "3d-model"
|
| 91 |
+
},
|
| 92 |
+
"characteristics": {
|
| 93 |
+
"processType": "transformation",
|
| 94 |
+
"modification": "transformation"
|
| 95 |
+
},
|
| 96 |
+
"metadata": {
|
| 97 |
+
"maturityLevel": "experimental",
|
| 98 |
+
"commonUseCases": [
|
| 99 |
+
"Artistic 3D styling",
|
| 100 |
+
"Model transformation",
|
| 101 |
+
"Creative effects"
|
| 102 |
+
],
|
| 103 |
+
"platforms": ["Experimental"],
|
| 104 |
+
"exampleModels": []
|
| 105 |
+
}
|
| 106 |
+
}
|
| 107 |
+
]
|
| 108 |
}
|
taxonomy/MODALITY_INDEX.md
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Master Modality Index
|
| 2 |
+
|
| 3 |
+
This document serves as the definitive index of all modalities in the taxonomy, with unique IDs and their locations.
|
| 4 |
+
|
| 5 |
+
## ID Format
|
| 6 |
+
|
| 7 |
+
Modality IDs follow this format: `{input}-to-{output}-{variant}`
|
| 8 |
+
|
| 9 |
+
Examples:
|
| 10 |
+
- `text-to-img` - Text to Image (basic)
|
| 11 |
+
- `img-to-vid-lipsync-audio` - Image to Video with Lip Sync from Audio
|
| 12 |
+
- `audio-to-text-transcription` - Audio to Text (Transcription)
|
| 13 |
+
|
| 14 |
+
## Unique ID Registry
|
| 15 |
+
|
| 16 |
+
### Video Generation (Creation)
|
| 17 |
+
|
| 18 |
+
| ID | Name | Input | Output | File Location |
|
| 19 |
+
|----|------|-------|--------|---------------|
|
| 20 |
+
| `img-to-vid-no-audio` | Image to Video (No Audio) | image | video (no audio) | video-generation/creation/modalities.json |
|
| 21 |
+
| `img-to-vid-ambient-audio` | Image to Video (Ambient Audio) | image + text | video (ambient audio) | video-generation/creation/modalities.json |
|
| 22 |
+
| `img-to-vid-lipsync-text` | Image to Video (Lip Sync from Text) | image + text | video (speech) | video-generation/creation/modalities.json |
|
| 23 |
+
| `img-to-vid-lipsync-audio` | Image to Video (Lip Sync from Audio) | image + audio | video (speech) | video-generation/creation/modalities.json |
|
| 24 |
+
| `img-to-vid-lipsync-lora` | Image to Video (Lip Sync with LoRA Character) | image + text + lora | video (speech) | video-generation/creation/modalities.json |
|
| 25 |
+
| `text-to-vid-no-audio` | Text to Video (No Audio) | text | video (no audio) | video-generation/creation/modalities.json |
|
| 26 |
+
| `text-to-vid-with-audio` | Text to Video (With Audio) | text | video (synchronized audio) | video-generation/creation/modalities.json |
|
| 27 |
+
| `audio-to-vid` | Audio to Video | audio + text | video (original audio) | video-generation/creation/modalities.json |
|
| 28 |
+
| `multimodal-img-audio-to-vid` | Image + Audio to Video | image + audio | video (original audio) | video-generation/creation/modalities.json |
|
| 29 |
+
| `multimodal-text-img-to-vid` | Text + Image to Video | text + image | video (no audio) | video-generation/creation/modalities.json |
|
| 30 |
+
| `3d-to-vid` | 3D Model to Video | 3d-model | video (no audio) | video-generation/creation/modalities.json |
|
| 31 |
+
|
| 32 |
+
### Video Generation (Editing)
|
| 33 |
+
|
| 34 |
+
| ID | Name | Input | Output | File Location |
|
| 35 |
+
|----|------|-------|--------|---------------|
|
| 36 |
+
| *(To be populated)* | | | | video-generation/editing/modalities.json |
|
| 37 |
+
|
| 38 |
+
### Audio Generation (Creation)
|
| 39 |
+
|
| 40 |
+
| ID | Name | Input | Output | File Location |
|
| 41 |
+
|----|------|-------|--------|---------------|
|
| 42 |
+
| `text-to-audio` | Text to Audio | text | audio (general) | audio-generation/creation/modalities.json |
|
| 43 |
+
| `text-to-speech` | Text to Speech | text | audio (speech) | audio-generation/creation/modalities.json |
|
| 44 |
+
| `text-to-music` | Text to Music | text | audio (music) | audio-generation/creation/modalities.json |
|
| 45 |
+
|
| 46 |
+
### Audio Generation (Editing)
|
| 47 |
+
|
| 48 |
+
| ID | Name | Input | Output | File Location |
|
| 49 |
+
|----|------|-------|--------|---------------|
|
| 50 |
+
| *(To be populated)* | | | | audio-generation/editing/modalities.json |
|
| 51 |
+
|
| 52 |
+
### Image Generation (Creation)
|
| 53 |
+
|
| 54 |
+
| ID | Name | Input | Output | File Location |
|
| 55 |
+
|----|------|-------|--------|---------------|
|
| 56 |
+
| `text-to-img` | Text to Image | text | image | image-generation/creation/modalities.json |
|
| 57 |
+
|
| 58 |
+
### Image Generation (Editing)
|
| 59 |
+
|
| 60 |
+
| ID | Name | Input | Output | File Location |
|
| 61 |
+
|----|------|-------|--------|---------------|
|
| 62 |
+
| *(To be populated)* | | | | image-generation/editing/modalities.json |
|
| 63 |
+
|
| 64 |
+
### Text Generation (Creation)
|
| 65 |
+
|
| 66 |
+
| ID | Name | Input | Output | File Location |
|
| 67 |
+
|----|------|-------|--------|---------------|
|
| 68 |
+
| *(To be populated)* | | | | text-generation/creation/modalities.json |
|
| 69 |
+
|
| 70 |
+
### Text Generation (Editing)
|
| 71 |
+
|
| 72 |
+
| ID | Name | Input | Output | File Location |
|
| 73 |
+
|----|------|-------|--------|---------------|
|
| 74 |
+
| *(To be populated)* | | | | text-generation/editing/modalities.json |
|
| 75 |
+
|
| 76 |
+
### 3D Generation (Creation)
|
| 77 |
+
|
| 78 |
+
| ID | Name | Input | Output | File Location |
|
| 79 |
+
|----|------|-------|--------|---------------|
|
| 80 |
+
| *(To be populated)* | | | | 3d-generation/creation/modalities.json |
|
| 81 |
+
|
| 82 |
+
### 3D Generation (Editing)
|
| 83 |
+
|
| 84 |
+
| ID | Name | Input | Output | File Location |
|
| 85 |
+
|----|------|-------|--------|---------------|
|
| 86 |
+
| *(To be populated)* | | | | 3d-generation/editing/modalities.json |
|
| 87 |
+
|
| 88 |
+
## ID Assignment Guidelines
|
| 89 |
+
|
| 90 |
+
1. **Uniqueness**: Each modality must have a globally unique ID across all files
|
| 91 |
+
2. **Consistency**: Use kebab-case format
|
| 92 |
+
3. **Descriptive**: ID should indicate input, output, and key differentiator
|
| 93 |
+
4. **Stable**: IDs should not change once assigned (treat as permanent identifiers)
|
| 94 |
+
5. **Sequential**: When adding variants, use descriptive suffixes (not numbers)
|
| 95 |
+
|
| 96 |
+
## Next Available ID Prefixes
|
| 97 |
+
|
| 98 |
+
These prefixes are ready for new modalities:
|
| 99 |
+
|
| 100 |
+
### Video
|
| 101 |
+
- `vid-to-vid-*` (video editing/transformation)
|
| 102 |
+
- `music-to-vid-*` (music visualization)
|
| 103 |
+
- `multimodal-*-to-vid` (other multimodal combinations)
|
| 104 |
+
|
| 105 |
+
### Audio
|
| 106 |
+
- `audio-to-audio-*` (audio editing)
|
| 107 |
+
- `img-to-audio-*` (image sonification)
|
| 108 |
+
- `vid-to-audio-*` (video to audio extraction/generation)
|
| 109 |
+
- `music-to-*` (music transformations)
|
| 110 |
+
|
| 111 |
+
### Image
|
| 112 |
+
- `img-to-img-*` (image editing)
|
| 113 |
+
- `audio-to-img-*` (audio visualization)
|
| 114 |
+
- `vid-to-img-*` (video to image extraction)
|
| 115 |
+
- `multimodal-*-to-img` (multimodal image generation)
|
| 116 |
+
|
| 117 |
+
### Text
|
| 118 |
+
- `audio-to-text-*` (transcription, audio description)
|
| 119 |
+
- `img-to-text-*` (image captioning, OCR)
|
| 120 |
+
- `vid-to-text-*` (video captioning, transcription)
|
| 121 |
+
- `multimodal-*-to-text` (multimodal text generation)
|
| 122 |
+
|
| 123 |
+
### 3D
|
| 124 |
+
- `text-to-3d-*` (text to 3D model)
|
| 125 |
+
- `img-to-3d-*` (image to 3D reconstruction)
|
| 126 |
+
- `vid-to-3d-*` (video to 3D reconstruction)
|
| 127 |
+
- `3d-to-3d-*` (3D model editing)
|
| 128 |
+
|
| 129 |
+
## Change Log
|
| 130 |
+
|
| 131 |
+
- 2025-10-22: Initial master index created with existing modalities
|
| 132 |
+
- *Future changes will be logged here*
|
taxonomy/audio-generation/creation/modalities.json
CHANGED
|
@@ -84,6 +84,193 @@
|
|
| 84 |
"platforms": ["Replicate", "Stability AI"],
|
| 85 |
"exampleModels": ["MusicGen", "Stable Audio"]
|
| 86 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
}
|
| 88 |
]
|
| 89 |
}
|
|
|
|
| 84 |
"platforms": ["Replicate", "Stability AI"],
|
| 85 |
"exampleModels": ["MusicGen", "Stable Audio"]
|
| 86 |
}
|
| 87 |
+
},
|
| 88 |
+
{
|
| 89 |
+
"id": "text-to-speech-voice-clone",
|
| 90 |
+
"name": "Text to Speech (Voice Cloning)",
|
| 91 |
+
"input": {
|
| 92 |
+
"primary": "text",
|
| 93 |
+
"secondary": ["audio"]
|
| 94 |
+
},
|
| 95 |
+
"output": {
|
| 96 |
+
"primary": "audio",
|
| 97 |
+
"audioType": "speech"
|
| 98 |
+
},
|
| 99 |
+
"characteristics": {
|
| 100 |
+
"processType": "synthesis",
|
| 101 |
+
"audioType": "speech",
|
| 102 |
+
"voiceCloning": true
|
| 103 |
+
},
|
| 104 |
+
"metadata": {
|
| 105 |
+
"maturityLevel": "mature",
|
| 106 |
+
"commonUseCases": [
|
| 107 |
+
"Custom voice synthesis",
|
| 108 |
+
"Personalized narration",
|
| 109 |
+
"Voice preservation"
|
| 110 |
+
],
|
| 111 |
+
"platforms": ["ElevenLabs", "Replicate", "PlayHT"],
|
| 112 |
+
"exampleModels": ["ElevenLabs", "XTTS", "Bark"]
|
| 113 |
+
}
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"id": "text-to-sound-effects",
|
| 117 |
+
"name": "Text to Sound Effects",
|
| 118 |
+
"input": {
|
| 119 |
+
"primary": "text",
|
| 120 |
+
"secondary": []
|
| 121 |
+
},
|
| 122 |
+
"output": {
|
| 123 |
+
"primary": "audio",
|
| 124 |
+
"audioType": "sound-effects"
|
| 125 |
+
},
|
| 126 |
+
"characteristics": {
|
| 127 |
+
"processType": "synthesis",
|
| 128 |
+
"audioType": "sound-effects"
|
| 129 |
+
},
|
| 130 |
+
"metadata": {
|
| 131 |
+
"maturityLevel": "emerging",
|
| 132 |
+
"commonUseCases": [
|
| 133 |
+
"SFX generation",
|
| 134 |
+
"Foley creation",
|
| 135 |
+
"Game audio"
|
| 136 |
+
],
|
| 137 |
+
"platforms": ["Replicate", "AudioCraft"],
|
| 138 |
+
"exampleModels": ["AudioGen", "AudioLDM"]
|
| 139 |
+
}
|
| 140 |
+
},
|
| 141 |
+
{
|
| 142 |
+
"id": "img-to-audio",
|
| 143 |
+
"name": "Image to Audio",
|
| 144 |
+
"input": {
|
| 145 |
+
"primary": "image",
|
| 146 |
+
"secondary": ["text"]
|
| 147 |
+
},
|
| 148 |
+
"output": {
|
| 149 |
+
"primary": "audio",
|
| 150 |
+
"audioType": "general"
|
| 151 |
+
},
|
| 152 |
+
"characteristics": {
|
| 153 |
+
"processType": "synthesis",
|
| 154 |
+
"audioType": "general"
|
| 155 |
+
},
|
| 156 |
+
"metadata": {
|
| 157 |
+
"maturityLevel": "experimental",
|
| 158 |
+
"commonUseCases": [
|
| 159 |
+
"Image sonification",
|
| 160 |
+
"Scene audio generation",
|
| 161 |
+
"Accessibility"
|
| 162 |
+
],
|
| 163 |
+
"platforms": ["Experimental"],
|
| 164 |
+
"exampleModels": []
|
| 165 |
+
}
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"id": "music-to-music-style",
|
| 169 |
+
"name": "Music Style Transfer",
|
| 170 |
+
"input": {
|
| 171 |
+
"primary": "music",
|
| 172 |
+
"secondary": ["text"]
|
| 173 |
+
},
|
| 174 |
+
"output": {
|
| 175 |
+
"primary": "audio",
|
| 176 |
+
"audioType": "music"
|
| 177 |
+
},
|
| 178 |
+
"characteristics": {
|
| 179 |
+
"processType": "transformation",
|
| 180 |
+
"audioType": "music",
|
| 181 |
+
"melodic": true
|
| 182 |
+
},
|
| 183 |
+
"metadata": {
|
| 184 |
+
"maturityLevel": "experimental",
|
| 185 |
+
"commonUseCases": [
|
| 186 |
+
"Genre transformation",
|
| 187 |
+
"Instrument swap",
|
| 188 |
+
"Musical reimagining"
|
| 189 |
+
],
|
| 190 |
+
"platforms": ["Experimental"],
|
| 191 |
+
"exampleModels": []
|
| 192 |
+
}
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"id": "vid-to-audio-extraction",
|
| 196 |
+
"name": "Video to Audio Extraction",
|
| 197 |
+
"input": {
|
| 198 |
+
"primary": "video",
|
| 199 |
+
"secondary": []
|
| 200 |
+
},
|
| 201 |
+
"output": {
|
| 202 |
+
"primary": "audio",
|
| 203 |
+
"audioType": "general"
|
| 204 |
+
},
|
| 205 |
+
"characteristics": {
|
| 206 |
+
"processType": "transformation",
|
| 207 |
+
"audioType": "general"
|
| 208 |
+
},
|
| 209 |
+
"metadata": {
|
| 210 |
+
"maturityLevel": "mature",
|
| 211 |
+
"commonUseCases": [
|
| 212 |
+
"Audio extraction",
|
| 213 |
+
"Soundtrack isolation",
|
| 214 |
+
"Voice extraction"
|
| 215 |
+
],
|
| 216 |
+
"platforms": ["FFmpeg", "Standard tools"],
|
| 217 |
+
"exampleModels": []
|
| 218 |
+
}
|
| 219 |
+
},
|
| 220 |
+
{
|
| 221 |
+
"id": "humming-to-music",
|
| 222 |
+
"name": "Humming to Music",
|
| 223 |
+
"input": {
|
| 224 |
+
"primary": "audio",
|
| 225 |
+
"secondary": ["text"]
|
| 226 |
+
},
|
| 227 |
+
"output": {
|
| 228 |
+
"primary": "audio",
|
| 229 |
+
"audioType": "music"
|
| 230 |
+
},
|
| 231 |
+
"characteristics": {
|
| 232 |
+
"processType": "synthesis",
|
| 233 |
+
"audioType": "music",
|
| 234 |
+
"melodic": true
|
| 235 |
+
},
|
| 236 |
+
"metadata": {
|
| 237 |
+
"maturityLevel": "experimental",
|
| 238 |
+
"commonUseCases": [
|
| 239 |
+
"Melody to full track",
|
| 240 |
+
"Musical idea development",
|
| 241 |
+
"Composition assistance"
|
| 242 |
+
],
|
| 243 |
+
"platforms": ["Experimental"],
|
| 244 |
+
"exampleModels": []
|
| 245 |
+
}
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"id": "lyrics-to-music",
|
| 249 |
+
"name": "Lyrics to Music",
|
| 250 |
+
"input": {
|
| 251 |
+
"primary": "text",
|
| 252 |
+
"secondary": []
|
| 253 |
+
},
|
| 254 |
+
"output": {
|
| 255 |
+
"primary": "audio",
|
| 256 |
+
"audioType": "music"
|
| 257 |
+
},
|
| 258 |
+
"characteristics": {
|
| 259 |
+
"processType": "synthesis",
|
| 260 |
+
"audioType": "music",
|
| 261 |
+
"melodic": true,
|
| 262 |
+
"voiceCloning": false
|
| 263 |
+
},
|
| 264 |
+
"metadata": {
|
| 265 |
+
"maturityLevel": "emerging",
|
| 266 |
+
"commonUseCases": [
|
| 267 |
+
"Song generation",
|
| 268 |
+
"Music composition",
|
| 269 |
+
"Vocal track creation"
|
| 270 |
+
],
|
| 271 |
+
"platforms": ["Suno", "Udio", "Replicate"],
|
| 272 |
+
"exampleModels": ["Suno", "Udio", "MusicGen"]
|
| 273 |
+
}
|
| 274 |
}
|
| 275 |
]
|
| 276 |
}
|
taxonomy/audio-generation/editing/modalities.json
CHANGED
|
@@ -61,6 +61,193 @@
|
|
| 61 |
"parent": "audio-to-audio-inpainting",
|
| 62 |
"note": "Music inpainting is a specialized subset of audio inpainting"
|
| 63 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
}
|
| 65 |
]
|
| 66 |
}
|
|
|
|
| 61 |
"parent": "audio-to-audio-inpainting",
|
| 62 |
"note": "Music inpainting is a specialized subset of audio inpainting"
|
| 63 |
}
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"id": "audio-to-audio-enhancement",
|
| 67 |
+
"name": "Audio Enhancement",
|
| 68 |
+
"input": {
|
| 69 |
+
"primary": "audio",
|
| 70 |
+
"secondary": []
|
| 71 |
+
},
|
| 72 |
+
"output": {
|
| 73 |
+
"primary": "audio",
|
| 74 |
+
"audioType": "general"
|
| 75 |
+
},
|
| 76 |
+
"characteristics": {
|
| 77 |
+
"processType": "enhancement",
|
| 78 |
+
"modification": "enhancement"
|
| 79 |
+
},
|
| 80 |
+
"metadata": {
|
| 81 |
+
"maturityLevel": "mature",
|
| 82 |
+
"commonUseCases": [
|
| 83 |
+
"Noise reduction",
|
| 84 |
+
"Quality improvement",
|
| 85 |
+
"Audio cleanup"
|
| 86 |
+
],
|
| 87 |
+
"platforms": ["Adobe", "iZotope", "Replicate"],
|
| 88 |
+
"exampleModels": ["Adobe Podcast", "Krisp", "Denoiser"]
|
| 89 |
+
}
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"id": "audio-to-audio-restoration",
|
| 93 |
+
"name": "Audio Restoration",
|
| 94 |
+
"input": {
|
| 95 |
+
"primary": "audio",
|
| 96 |
+
"secondary": []
|
| 97 |
+
},
|
| 98 |
+
"output": {
|
| 99 |
+
"primary": "audio",
|
| 100 |
+
"audioType": "general"
|
| 101 |
+
},
|
| 102 |
+
"characteristics": {
|
| 103 |
+
"processType": "enhancement",
|
| 104 |
+
"modification": "restoration"
|
| 105 |
+
},
|
| 106 |
+
"metadata": {
|
| 107 |
+
"maturityLevel": "mature",
|
| 108 |
+
"commonUseCases": [
|
| 109 |
+
"Historical recording restoration",
|
| 110 |
+
"Audio artifact removal",
|
| 111 |
+
"Damaged audio repair"
|
| 112 |
+
],
|
| 113 |
+
"platforms": ["iZotope", "Adobe", "Accusonus"],
|
| 114 |
+
"exampleModels": ["iZotope RX", "Adobe Audition"]
|
| 115 |
+
}
|
| 116 |
+
},
|
| 117 |
+
{
|
| 118 |
+
"id": "audio-to-audio-voice-conversion",
|
| 119 |
+
"name": "Voice Conversion",
|
| 120 |
+
"input": {
|
| 121 |
+
"primary": "audio",
|
| 122 |
+
"secondary": ["audio"]
|
| 123 |
+
},
|
| 124 |
+
"output": {
|
| 125 |
+
"primary": "audio",
|
| 126 |
+
"audioType": "speech"
|
| 127 |
+
},
|
| 128 |
+
"characteristics": {
|
| 129 |
+
"processType": "transformation",
|
| 130 |
+
"audioType": "speech",
|
| 131 |
+
"voiceCloning": true,
|
| 132 |
+
"modification": "transformation"
|
| 133 |
+
},
|
| 134 |
+
"metadata": {
|
| 135 |
+
"maturityLevel": "emerging",
|
| 136 |
+
"commonUseCases": [
|
| 137 |
+
"Voice swapping",
|
| 138 |
+
"Dubbing",
|
| 139 |
+
"Voice translation"
|
| 140 |
+
],
|
| 141 |
+
"platforms": ["ElevenLabs", "Respeecher", "Replicate"],
|
| 142 |
+
"exampleModels": ["RVC", "So-VITS-SVC", "Respeecher"]
|
| 143 |
+
}
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
"id": "music-to-music-stem-separation",
|
| 147 |
+
"name": "Music Stem Separation",
|
| 148 |
+
"input": {
|
| 149 |
+
"primary": "music",
|
| 150 |
+
"secondary": []
|
| 151 |
+
},
|
| 152 |
+
"output": {
|
| 153 |
+
"primary": "audio",
|
| 154 |
+
"audioType": "music"
|
| 155 |
+
},
|
| 156 |
+
"characteristics": {
|
| 157 |
+
"processType": "transformation",
|
| 158 |
+
"audioType": "music",
|
| 159 |
+
"modification": "selective-editing"
|
| 160 |
+
},
|
| 161 |
+
"metadata": {
|
| 162 |
+
"maturityLevel": "mature",
|
| 163 |
+
"commonUseCases": [
|
| 164 |
+
"Vocal isolation",
|
| 165 |
+
"Instrument extraction",
|
| 166 |
+
"Remixing",
|
| 167 |
+
"Karaoke creation"
|
| 168 |
+
],
|
| 169 |
+
"platforms": ["Spleeter", "Demucs", "Replicate"],
|
| 170 |
+
"exampleModels": ["Demucs", "Spleeter", "Ultimate Vocal Remover"]
|
| 171 |
+
}
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"id": "audio-to-audio-speed-change",
|
| 175 |
+
"name": "Audio Speed/Pitch Modification",
|
| 176 |
+
"input": {
|
| 177 |
+
"primary": "audio",
|
| 178 |
+
"secondary": []
|
| 179 |
+
},
|
| 180 |
+
"output": {
|
| 181 |
+
"primary": "audio",
|
| 182 |
+
"audioType": "general"
|
| 183 |
+
},
|
| 184 |
+
"characteristics": {
|
| 185 |
+
"processType": "transformation",
|
| 186 |
+
"modification": "transformation"
|
| 187 |
+
},
|
| 188 |
+
"metadata": {
|
| 189 |
+
"maturityLevel": "mature",
|
| 190 |
+
"commonUseCases": [
|
| 191 |
+
"Tempo adjustment",
|
| 192 |
+
"Pitch shifting",
|
| 193 |
+
"Time stretching"
|
| 194 |
+
],
|
| 195 |
+
"platforms": ["Standard tools", "Adobe"],
|
| 196 |
+
"exampleModels": ["Rubber Band", "Paulstretch"]
|
| 197 |
+
}
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"id": "music-to-music-mastering",
|
| 201 |
+
"name": "AI Music Mastering",
|
| 202 |
+
"input": {
|
| 203 |
+
"primary": "music",
|
| 204 |
+
"secondary": []
|
| 205 |
+
},
|
| 206 |
+
"output": {
|
| 207 |
+
"primary": "audio",
|
| 208 |
+
"audioType": "music"
|
| 209 |
+
},
|
| 210 |
+
"characteristics": {
|
| 211 |
+
"processType": "enhancement",
|
| 212 |
+
"audioType": "music",
|
| 213 |
+
"modification": "enhancement"
|
| 214 |
+
},
|
| 215 |
+
"metadata": {
|
| 216 |
+
"maturityLevel": "emerging",
|
| 217 |
+
"commonUseCases": [
|
| 218 |
+
"Automated mastering",
|
| 219 |
+
"Mix enhancement",
|
| 220 |
+
"Final polish"
|
| 221 |
+
],
|
| 222 |
+
"platforms": ["LANDR", "iZotope", "eMastered"],
|
| 223 |
+
"exampleModels": ["LANDR", "iZotope Ozone"]
|
| 224 |
+
}
|
| 225 |
+
},
|
| 226 |
+
{
|
| 227 |
+
"id": "audio-to-audio-spatial",
|
| 228 |
+
"name": "Spatial Audio Conversion",
|
| 229 |
+
"input": {
|
| 230 |
+
"primary": "audio",
|
| 231 |
+
"secondary": []
|
| 232 |
+
},
|
| 233 |
+
"output": {
|
| 234 |
+
"primary": "audio",
|
| 235 |
+
"audioType": "general"
|
| 236 |
+
},
|
| 237 |
+
"characteristics": {
|
| 238 |
+
"processType": "transformation",
|
| 239 |
+
"modification": "transformation"
|
| 240 |
+
},
|
| 241 |
+
"metadata": {
|
| 242 |
+
"maturityLevel": "emerging",
|
| 243 |
+
"commonUseCases": [
|
| 244 |
+
"Stereo to spatial",
|
| 245 |
+
"Binaural conversion",
|
| 246 |
+
"3D audio creation"
|
| 247 |
+
],
|
| 248 |
+
"platforms": ["Dolby", "Sony", "Experimental"],
|
| 249 |
+
"exampleModels": ["Dolby Atmos", "Sony 360 Reality Audio"]
|
| 250 |
+
}
|
| 251 |
}
|
| 252 |
]
|
| 253 |
}
|
taxonomy/image-generation/creation/modalities.json
CHANGED
|
@@ -29,6 +29,160 @@
|
|
| 29 |
"platforms": ["Replicate", "Stability AI", "Midjourney", "DALL-E"],
|
| 30 |
"exampleModels": ["Stable Diffusion", "DALL-E 3", "Midjourney"]
|
| 31 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
}
|
| 33 |
]
|
| 34 |
}
|
|
|
|
| 29 |
"platforms": ["Replicate", "Stability AI", "Midjourney", "DALL-E"],
|
| 30 |
"exampleModels": ["Stable Diffusion", "DALL-E 3", "Midjourney"]
|
| 31 |
}
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"id": "text-img-to-img",
|
| 35 |
+
"name": "Text + Image to Image",
|
| 36 |
+
"input": {
|
| 37 |
+
"primary": "text",
|
| 38 |
+
"secondary": ["image"]
|
| 39 |
+
},
|
| 40 |
+
"output": {
|
| 41 |
+
"primary": "image"
|
| 42 |
+
},
|
| 43 |
+
"characteristics": {
|
| 44 |
+
"processType": "synthesis",
|
| 45 |
+
"generationType": "synthesis",
|
| 46 |
+
"guidanceType": "text-and-visual"
|
| 47 |
+
},
|
| 48 |
+
"metadata": {
|
| 49 |
+
"maturityLevel": "mature",
|
| 50 |
+
"commonUseCases": [
|
| 51 |
+
"Image-guided generation",
|
| 52 |
+
"Style reference",
|
| 53 |
+
"Composition guidance"
|
| 54 |
+
],
|
| 55 |
+
"platforms": ["Replicate", "Stability AI", "Midjourney"],
|
| 56 |
+
"exampleModels": ["Stable Diffusion with ControlNet", "DALL-E 3", "Midjourney"]
|
| 57 |
+
}
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"id": "img-to-img-upscale",
|
| 61 |
+
"name": "Image Upscaling",
|
| 62 |
+
"input": {
|
| 63 |
+
"primary": "image",
|
| 64 |
+
"secondary": []
|
| 65 |
+
},
|
| 66 |
+
"output": {
|
| 67 |
+
"primary": "image"
|
| 68 |
+
},
|
| 69 |
+
"characteristics": {
|
| 70 |
+
"processType": "enhancement",
|
| 71 |
+
"generationType": "synthesis"
|
| 72 |
+
},
|
| 73 |
+
"metadata": {
|
| 74 |
+
"maturityLevel": "mature",
|
| 75 |
+
"commonUseCases": [
|
| 76 |
+
"Resolution enhancement",
|
| 77 |
+
"Quality improvement",
|
| 78 |
+
"Detail enhancement"
|
| 79 |
+
],
|
| 80 |
+
"platforms": ["Topaz", "Replicate", "Stability AI"],
|
| 81 |
+
"exampleModels": ["Real-ESRGAN", "Topaz Gigapixel", "SUPIR"]
|
| 82 |
+
}
|
| 83 |
+
},
|
| 84 |
+
{
|
| 85 |
+
"id": "vid-to-img-frame-extraction",
|
| 86 |
+
"name": "Video to Image (Frame Extraction)",
|
| 87 |
+
"input": {
|
| 88 |
+
"primary": "video",
|
| 89 |
+
"secondary": []
|
| 90 |
+
},
|
| 91 |
+
"output": {
|
| 92 |
+
"primary": "image"
|
| 93 |
+
},
|
| 94 |
+
"characteristics": {
|
| 95 |
+
"processType": "transformation",
|
| 96 |
+
"generationType": "synthesis"
|
| 97 |
+
},
|
| 98 |
+
"metadata": {
|
| 99 |
+
"maturityLevel": "mature",
|
| 100 |
+
"commonUseCases": [
|
| 101 |
+
"Frame extraction",
|
| 102 |
+
"Thumbnail generation",
|
| 103 |
+
"Video analysis"
|
| 104 |
+
],
|
| 105 |
+
"platforms": ["FFmpeg", "Standard tools"],
|
| 106 |
+
"exampleModels": []
|
| 107 |
+
}
|
| 108 |
+
},
|
| 109 |
+
{
|
| 110 |
+
"id": "3d-to-img-render",
|
| 111 |
+
"name": "3D to Image (Rendering)",
|
| 112 |
+
"input": {
|
| 113 |
+
"primary": "3d-model",
|
| 114 |
+
"secondary": []
|
| 115 |
+
},
|
| 116 |
+
"output": {
|
| 117 |
+
"primary": "image"
|
| 118 |
+
},
|
| 119 |
+
"characteristics": {
|
| 120 |
+
"processType": "rendering",
|
| 121 |
+
"renderType": "3d-rendering",
|
| 122 |
+
"generationType": "synthesis"
|
| 123 |
+
},
|
| 124 |
+
"metadata": {
|
| 125 |
+
"maturityLevel": "mature",
|
| 126 |
+
"commonUseCases": [
|
| 127 |
+
"Product rendering",
|
| 128 |
+
"3D visualization",
|
| 129 |
+
"Architectural rendering"
|
| 130 |
+
],
|
| 131 |
+
"platforms": ["Blender", "Unreal Engine", "Unity"],
|
| 132 |
+
"exampleModels": []
|
| 133 |
+
}
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"id": "audio-to-img-visualization",
|
| 137 |
+
"name": "Audio to Image (Visualization)",
|
| 138 |
+
"input": {
|
| 139 |
+
"primary": "audio",
|
| 140 |
+
"secondary": ["text"]
|
| 141 |
+
},
|
| 142 |
+
"output": {
|
| 143 |
+
"primary": "image"
|
| 144 |
+
},
|
| 145 |
+
"characteristics": {
|
| 146 |
+
"processType": "synthesis",
|
| 147 |
+
"audioVisualization": true,
|
| 148 |
+
"generationType": "synthesis"
|
| 149 |
+
},
|
| 150 |
+
"metadata": {
|
| 151 |
+
"maturityLevel": "experimental",
|
| 152 |
+
"commonUseCases": [
|
| 153 |
+
"Album art generation",
|
| 154 |
+
"Sound visualization",
|
| 155 |
+
"Music imagery"
|
| 156 |
+
],
|
| 157 |
+
"platforms": ["Experimental"],
|
| 158 |
+
"exampleModels": []
|
| 159 |
+
}
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"id": "sketch-to-img",
|
| 163 |
+
"name": "Sketch to Image",
|
| 164 |
+
"input": {
|
| 165 |
+
"primary": "image",
|
| 166 |
+
"secondary": ["text"]
|
| 167 |
+
},
|
| 168 |
+
"output": {
|
| 169 |
+
"primary": "image"
|
| 170 |
+
},
|
| 171 |
+
"characteristics": {
|
| 172 |
+
"processType": "synthesis",
|
| 173 |
+
"generationType": "synthesis",
|
| 174 |
+
"guidanceType": "text-and-visual"
|
| 175 |
+
},
|
| 176 |
+
"metadata": {
|
| 177 |
+
"maturityLevel": "emerging",
|
| 178 |
+
"commonUseCases": [
|
| 179 |
+
"Sketch refinement",
|
| 180 |
+
"Concept development",
|
| 181 |
+
"Design exploration"
|
| 182 |
+
],
|
| 183 |
+
"platforms": ["Replicate", "Stability AI"],
|
| 184 |
+
"exampleModels": ["ControlNet Scribble", "Pix2Pix"]
|
| 185 |
+
}
|
| 186 |
}
|
| 187 |
]
|
| 188 |
}
|
taxonomy/image-generation/editing/modalities.json
CHANGED
|
@@ -30,6 +30,293 @@
|
|
| 30 |
"platforms": ["Replicate", "Stability AI", "Midjourney"],
|
| 31 |
"exampleModels": ["Stable Diffusion img2img", "ControlNet"]
|
| 32 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
}
|
| 34 |
]
|
| 35 |
}
|
|
|
|
| 30 |
"platforms": ["Replicate", "Stability AI", "Midjourney"],
|
| 31 |
"exampleModels": ["Stable Diffusion img2img", "ControlNet"]
|
| 32 |
}
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
"id": "img-to-img-inpainting",
|
| 36 |
+
"name": "Image Inpainting",
|
| 37 |
+
"input": {
|
| 38 |
+
"primary": "image",
|
| 39 |
+
"secondary": ["text"]
|
| 40 |
+
},
|
| 41 |
+
"output": {
|
| 42 |
+
"primary": "image"
|
| 43 |
+
},
|
| 44 |
+
"characteristics": {
|
| 45 |
+
"processType": "inpainting",
|
| 46 |
+
"transformationTypes": ["inpainting"],
|
| 47 |
+
"modification": "selective-editing"
|
| 48 |
+
},
|
| 49 |
+
"metadata": {
|
| 50 |
+
"maturityLevel": "mature",
|
| 51 |
+
"commonUseCases": [
|
| 52 |
+
"Object removal",
|
| 53 |
+
"Background extension",
|
| 54 |
+
"Image repair",
|
| 55 |
+
"Content-aware fill"
|
| 56 |
+
],
|
| 57 |
+
"platforms": ["Replicate", "Adobe", "Stability AI"],
|
| 58 |
+
"exampleModels": ["Stable Diffusion Inpainting", "LaMa", "Adobe Firefly"]
|
| 59 |
+
}
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
"id": "img-to-img-outpainting",
|
| 63 |
+
"name": "Image Outpainting",
|
| 64 |
+
"input": {
|
| 65 |
+
"primary": "image",
|
| 66 |
+
"secondary": ["text"]
|
| 67 |
+
},
|
| 68 |
+
"output": {
|
| 69 |
+
"primary": "image"
|
| 70 |
+
},
|
| 71 |
+
"characteristics": {
|
| 72 |
+
"processType": "synthesis",
|
| 73 |
+
"transformationTypes": ["editing"],
|
| 74 |
+
"modification": "selective-editing"
|
| 75 |
+
},
|
| 76 |
+
"metadata": {
|
| 77 |
+
"maturityLevel": "mature",
|
| 78 |
+
"commonUseCases": [
|
| 79 |
+
"Canvas extension",
|
| 80 |
+
"Image expansion",
|
| 81 |
+
"Background generation"
|
| 82 |
+
],
|
| 83 |
+
"platforms": ["Replicate", "DALL-E", "Stability AI"],
|
| 84 |
+
"exampleModels": ["Stable Diffusion Outpainting", "DALL-E Outpainting"]
|
| 85 |
+
}
|
| 86 |
+
},
|
| 87 |
+
{
|
| 88 |
+
"id": "img-to-img-style-transfer",
|
| 89 |
+
"name": "Image Style Transfer",
|
| 90 |
+
"input": {
|
| 91 |
+
"primary": "image",
|
| 92 |
+
"secondary": ["image", "text"]
|
| 93 |
+
},
|
| 94 |
+
"output": {
|
| 95 |
+
"primary": "image"
|
| 96 |
+
},
|
| 97 |
+
"characteristics": {
|
| 98 |
+
"processType": "transformation",
|
| 99 |
+
"transformationTypes": ["style-transfer"],
|
| 100 |
+
"modification": "transformation"
|
| 101 |
+
},
|
| 102 |
+
"metadata": {
|
| 103 |
+
"maturityLevel": "mature",
|
| 104 |
+
"commonUseCases": [
|
| 105 |
+
"Artistic style application",
|
| 106 |
+
"Photo stylization",
|
| 107 |
+
"Creative filters"
|
| 108 |
+
],
|
| 109 |
+
"platforms": ["Replicate", "Stability AI"],
|
| 110 |
+
"exampleModels": ["StyleGAN", "Neural Style Transfer", "InstantStyle"]
|
| 111 |
+
}
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"id": "img-to-img-colorization",
|
| 115 |
+
"name": "Image Colorization",
|
| 116 |
+
"input": {
|
| 117 |
+
"primary": "image",
|
| 118 |
+
"secondary": []
|
| 119 |
+
},
|
| 120 |
+
"output": {
|
| 121 |
+
"primary": "image"
|
| 122 |
+
},
|
| 123 |
+
"characteristics": {
|
| 124 |
+
"processType": "transformation",
|
| 125 |
+
"transformationTypes": ["enhancement"],
|
| 126 |
+
"modification": "enhancement"
|
| 127 |
+
},
|
| 128 |
+
"metadata": {
|
| 129 |
+
"maturityLevel": "mature",
|
| 130 |
+
"commonUseCases": [
|
| 131 |
+
"Black and white colorization",
|
| 132 |
+
"Historical photo restoration",
|
| 133 |
+
"Photo enhancement"
|
| 134 |
+
],
|
| 135 |
+
"platforms": ["Replicate", "DeOldify"],
|
| 136 |
+
"exampleModels": ["DeOldify", "Colorful Image Colorization"]
|
| 137 |
+
}
|
| 138 |
+
},
|
| 139 |
+
{
|
| 140 |
+
"id": "img-to-img-enhancement",
|
| 141 |
+
"name": "Image Enhancement",
|
| 142 |
+
"input": {
|
| 143 |
+
"primary": "image",
|
| 144 |
+
"secondary": []
|
| 145 |
+
},
|
| 146 |
+
"output": {
|
| 147 |
+
"primary": "image"
|
| 148 |
+
},
|
| 149 |
+
"characteristics": {
|
| 150 |
+
"processType": "enhancement",
|
| 151 |
+
"transformationTypes": ["enhancement"],
|
| 152 |
+
"modification": "enhancement"
|
| 153 |
+
},
|
| 154 |
+
"metadata": {
|
| 155 |
+
"maturityLevel": "mature",
|
| 156 |
+
"commonUseCases": [
|
| 157 |
+
"Quality improvement",
|
| 158 |
+
"Noise reduction",
|
| 159 |
+
"Sharpening",
|
| 160 |
+
"Dynamic range enhancement"
|
| 161 |
+
],
|
| 162 |
+
"platforms": ["Topaz", "Adobe", "Replicate"],
|
| 163 |
+
"exampleModels": ["Topaz Photo AI", "Adobe Enhance"]
|
| 164 |
+
}
|
| 165 |
+
},
|
| 166 |
+
{
|
| 167 |
+
"id": "img-to-img-restoration",
|
| 168 |
+
"name": "Image Restoration",
|
| 169 |
+
"input": {
|
| 170 |
+
"primary": "image",
|
| 171 |
+
"secondary": []
|
| 172 |
+
},
|
| 173 |
+
"output": {
|
| 174 |
+
"primary": "image"
|
| 175 |
+
},
|
| 176 |
+
"characteristics": {
|
| 177 |
+
"processType": "enhancement",
|
| 178 |
+
"transformationTypes": ["enhancement"],
|
| 179 |
+
"modification": "restoration"
|
| 180 |
+
},
|
| 181 |
+
"metadata": {
|
| 182 |
+
"maturityLevel": "mature",
|
| 183 |
+
"commonUseCases": [
|
| 184 |
+
"Old photo restoration",
|
| 185 |
+
"Damaged image repair",
|
| 186 |
+
"Artifact removal"
|
| 187 |
+
],
|
| 188 |
+
"platforms": ["Replicate", "Remini"],
|
| 189 |
+
"exampleModels": ["GFPGAN", "CodeFormer", "Remini"]
|
| 190 |
+
}
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"id": "img-to-img-background-removal",
|
| 194 |
+
"name": "Background Removal",
|
| 195 |
+
"input": {
|
| 196 |
+
"primary": "image",
|
| 197 |
+
"secondary": []
|
| 198 |
+
},
|
| 199 |
+
"output": {
|
| 200 |
+
"primary": "image"
|
| 201 |
+
},
|
| 202 |
+
"characteristics": {
|
| 203 |
+
"processType": "transformation",
|
| 204 |
+
"transformationTypes": ["object-editing"],
|
| 205 |
+
"modification": "selective-editing"
|
| 206 |
+
},
|
| 207 |
+
"metadata": {
|
| 208 |
+
"maturityLevel": "mature",
|
| 209 |
+
"commonUseCases": [
|
| 210 |
+
"Background removal",
|
| 211 |
+
"Subject isolation",
|
| 212 |
+
"Product photography"
|
| 213 |
+
],
|
| 214 |
+
"platforms": ["Remove.bg", "Adobe", "Replicate"],
|
| 215 |
+
"exampleModels": ["U2-Net", "RMBG", "SAM"]
|
| 216 |
+
}
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
"id": "img-to-img-relighting",
|
| 220 |
+
"name": "Image Relighting",
|
| 221 |
+
"input": {
|
| 222 |
+
"primary": "image",
|
| 223 |
+
"secondary": ["text"]
|
| 224 |
+
},
|
| 225 |
+
"output": {
|
| 226 |
+
"primary": "image"
|
| 227 |
+
},
|
| 228 |
+
"characteristics": {
|
| 229 |
+
"processType": "transformation",
|
| 230 |
+
"transformationTypes": ["enhancement"],
|
| 231 |
+
"modification": "enhancement"
|
| 232 |
+
},
|
| 233 |
+
"metadata": {
|
| 234 |
+
"maturityLevel": "emerging",
|
| 235 |
+
"commonUseCases": [
|
| 236 |
+
"Lighting adjustment",
|
| 237 |
+
"Portrait relighting",
|
| 238 |
+
"Scene mood change"
|
| 239 |
+
],
|
| 240 |
+
"platforms": ["Experimental", "Adobe"],
|
| 241 |
+
"exampleModels": ["IC-Light"]
|
| 242 |
+
}
|
| 243 |
+
},
|
| 244 |
+
{
|
| 245 |
+
"id": "img-to-img-face-swap",
|
| 246 |
+
"name": "Face Swap",
|
| 247 |
+
"input": {
|
| 248 |
+
"primary": "image",
|
| 249 |
+
"secondary": ["image"]
|
| 250 |
+
},
|
| 251 |
+
"output": {
|
| 252 |
+
"primary": "image"
|
| 253 |
+
},
|
| 254 |
+
"characteristics": {
|
| 255 |
+
"processType": "transformation",
|
| 256 |
+
"transformationTypes": ["object-editing"],
|
| 257 |
+
"modification": "selective-editing"
|
| 258 |
+
},
|
| 259 |
+
"metadata": {
|
| 260 |
+
"maturityLevel": "mature",
|
| 261 |
+
"commonUseCases": [
|
| 262 |
+
"Face replacement",
|
| 263 |
+
"Identity swap",
|
| 264 |
+
"Portrait editing"
|
| 265 |
+
],
|
| 266 |
+
"platforms": ["Replicate", "FaceSwap"],
|
| 267 |
+
"exampleModels": ["InsightFace", "SimSwap", "Roop"]
|
| 268 |
+
}
|
| 269 |
+
},
|
| 270 |
+
{
|
| 271 |
+
"id": "img-to-img-depth-map",
|
| 272 |
+
"name": "Depth Map Generation",
|
| 273 |
+
"input": {
|
| 274 |
+
"primary": "image",
|
| 275 |
+
"secondary": []
|
| 276 |
+
},
|
| 277 |
+
"output": {
|
| 278 |
+
"primary": "image"
|
| 279 |
+
},
|
| 280 |
+
"characteristics": {
|
| 281 |
+
"processType": "transformation",
|
| 282 |
+
"transformationTypes": ["editing"]
|
| 283 |
+
},
|
| 284 |
+
"metadata": {
|
| 285 |
+
"maturityLevel": "mature",
|
| 286 |
+
"commonUseCases": [
|
| 287 |
+
"Depth estimation",
|
| 288 |
+
"3D reconstruction prep",
|
| 289 |
+
"Spatial understanding"
|
| 290 |
+
],
|
| 291 |
+
"platforms": ["Replicate", "HuggingFace"],
|
| 292 |
+
"exampleModels": ["Depth-Anything", "MiDaS", "ZoeDepth"]
|
| 293 |
+
}
|
| 294 |
+
},
|
| 295 |
+
{
|
| 296 |
+
"id": "img-to-img-segmentation",
|
| 297 |
+
"name": "Image Segmentation",
|
| 298 |
+
"input": {
|
| 299 |
+
"primary": "image",
|
| 300 |
+
"secondary": ["text"]
|
| 301 |
+
},
|
| 302 |
+
"output": {
|
| 303 |
+
"primary": "image"
|
| 304 |
+
},
|
| 305 |
+
"characteristics": {
|
| 306 |
+
"processType": "transformation",
|
| 307 |
+
"transformationTypes": ["object-editing"],
|
| 308 |
+
"modification": "selective-editing"
|
| 309 |
+
},
|
| 310 |
+
"metadata": {
|
| 311 |
+
"maturityLevel": "mature",
|
| 312 |
+
"commonUseCases": [
|
| 313 |
+
"Object isolation",
|
| 314 |
+
"Semantic segmentation",
|
| 315 |
+
"Masking"
|
| 316 |
+
],
|
| 317 |
+
"platforms": ["Replicate", "Meta"],
|
| 318 |
+
"exampleModels": ["Segment Anything (SAM)", "Semantic Segment Anything"]
|
| 319 |
+
}
|
| 320 |
}
|
| 321 |
]
|
| 322 |
}
|
taxonomy/text-generation/creation/modalities.json
CHANGED
|
@@ -2,6 +2,230 @@
|
|
| 2 |
"fileType": "multimodal-ai-taxonomy",
|
| 3 |
"outputModality": "text",
|
| 4 |
"operationType": "creation",
|
| 5 |
-
"description": "Modalities for creating text content from various inputs
|
| 6 |
-
"modalities": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
}
|
|
|
|
| 2 |
"fileType": "multimodal-ai-taxonomy",
|
| 3 |
"outputModality": "text",
|
| 4 |
"operationType": "creation",
|
| 5 |
+
"description": "Modalities for creating text content from various multimodal inputs",
|
| 6 |
+
"modalities": [
|
| 7 |
+
{
|
| 8 |
+
"id": "audio-to-text-transcription",
|
| 9 |
+
"name": "Audio to Text (Transcription)",
|
| 10 |
+
"input": {
|
| 11 |
+
"primary": "audio",
|
| 12 |
+
"secondary": []
|
| 13 |
+
},
|
| 14 |
+
"output": {
|
| 15 |
+
"primary": "text"
|
| 16 |
+
},
|
| 17 |
+
"characteristics": {
|
| 18 |
+
"processType": "transformation"
|
| 19 |
+
},
|
| 20 |
+
"metadata": {
|
| 21 |
+
"maturityLevel": "mature",
|
| 22 |
+
"commonUseCases": [
|
| 23 |
+
"Speech transcription",
|
| 24 |
+
"Meeting notes",
|
| 25 |
+
"Subtitling",
|
| 26 |
+
"Accessibility"
|
| 27 |
+
],
|
| 28 |
+
"platforms": ["OpenAI", "AssemblyAI", "Deepgram", "Google Cloud"],
|
| 29 |
+
"exampleModels": ["Whisper", "AssemblyAI", "Deepgram Nova"]
|
| 30 |
+
}
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"id": "img-to-text-captioning",
|
| 34 |
+
"name": "Image to Text (Captioning)",
|
| 35 |
+
"input": {
|
| 36 |
+
"primary": "image",
|
| 37 |
+
"secondary": []
|
| 38 |
+
},
|
| 39 |
+
"output": {
|
| 40 |
+
"primary": "text"
|
| 41 |
+
},
|
| 42 |
+
"characteristics": {
|
| 43 |
+
"processType": "synthesis"
|
| 44 |
+
},
|
| 45 |
+
"metadata": {
|
| 46 |
+
"maturityLevel": "mature",
|
| 47 |
+
"commonUseCases": [
|
| 48 |
+
"Image description",
|
| 49 |
+
"Alt text generation",
|
| 50 |
+
"Scene understanding",
|
| 51 |
+
"Accessibility"
|
| 52 |
+
],
|
| 53 |
+
"platforms": ["OpenAI", "Google Cloud", "HuggingFace"],
|
| 54 |
+
"exampleModels": ["GPT-4 Vision", "BLIP", "LLaVA", "Gemini Vision"]
|
| 55 |
+
}
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"id": "img-to-text-ocr",
|
| 59 |
+
"name": "Image to Text (OCR)",
|
| 60 |
+
"input": {
|
| 61 |
+
"primary": "image",
|
| 62 |
+
"secondary": []
|
| 63 |
+
},
|
| 64 |
+
"output": {
|
| 65 |
+
"primary": "text"
|
| 66 |
+
},
|
| 67 |
+
"characteristics": {
|
| 68 |
+
"processType": "transformation"
|
| 69 |
+
},
|
| 70 |
+
"metadata": {
|
| 71 |
+
"maturityLevel": "mature",
|
| 72 |
+
"commonUseCases": [
|
| 73 |
+
"Text extraction",
|
| 74 |
+
"Document digitization",
|
| 75 |
+
"Receipt scanning",
|
| 76 |
+
"Data entry automation"
|
| 77 |
+
],
|
| 78 |
+
"platforms": ["Google Cloud", "AWS", "Azure", "Tesseract"],
|
| 79 |
+
"exampleModels": ["Google Cloud Vision", "AWS Textract", "Tesseract", "EasyOCR"]
|
| 80 |
+
}
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"id": "vid-to-text-transcription",
|
| 84 |
+
"name": "Video to Text (Transcription)",
|
| 85 |
+
"input": {
|
| 86 |
+
"primary": "video",
|
| 87 |
+
"secondary": []
|
| 88 |
+
},
|
| 89 |
+
"output": {
|
| 90 |
+
"primary": "text"
|
| 91 |
+
},
|
| 92 |
+
"characteristics": {
|
| 93 |
+
"processType": "transformation"
|
| 94 |
+
},
|
| 95 |
+
"metadata": {
|
| 96 |
+
"maturityLevel": "mature",
|
| 97 |
+
"commonUseCases": [
|
| 98 |
+
"Video subtitling",
|
| 99 |
+
"Content indexing",
|
| 100 |
+
"Meeting transcription",
|
| 101 |
+
"Accessibility"
|
| 102 |
+
],
|
| 103 |
+
"platforms": ["OpenAI", "AssemblyAI", "YouTube", "Rev"],
|
| 104 |
+
"exampleModels": ["Whisper", "AssemblyAI", "Google Speech-to-Text"]
|
| 105 |
+
}
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"id": "vid-to-text-captioning",
|
| 109 |
+
"name": "Video to Text (Captioning/Description)",
|
| 110 |
+
"input": {
|
| 111 |
+
"primary": "video",
|
| 112 |
+
"secondary": []
|
| 113 |
+
},
|
| 114 |
+
"output": {
|
| 115 |
+
"primary": "text"
|
| 116 |
+
},
|
| 117 |
+
"characteristics": {
|
| 118 |
+
"processType": "synthesis"
|
| 119 |
+
},
|
| 120 |
+
"metadata": {
|
| 121 |
+
"maturityLevel": "emerging",
|
| 122 |
+
"commonUseCases": [
|
| 123 |
+
"Video description",
|
| 124 |
+
"Content summarization",
|
| 125 |
+
"Scene understanding",
|
| 126 |
+
"Accessibility"
|
| 127 |
+
],
|
| 128 |
+
"platforms": ["OpenAI", "Google", "Experimental"],
|
| 129 |
+
"exampleModels": ["GPT-4 Vision", "Gemini Video", "Video-LLaMA"]
|
| 130 |
+
}
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"id": "multimodal-to-text-vqa",
|
| 134 |
+
"name": "Visual Question Answering",
|
| 135 |
+
"input": {
|
| 136 |
+
"primary": "image",
|
| 137 |
+
"secondary": ["text"]
|
| 138 |
+
},
|
| 139 |
+
"output": {
|
| 140 |
+
"primary": "text"
|
| 141 |
+
},
|
| 142 |
+
"characteristics": {
|
| 143 |
+
"processType": "synthesis",
|
| 144 |
+
"guidanceType": "multimodal"
|
| 145 |
+
},
|
| 146 |
+
"metadata": {
|
| 147 |
+
"maturityLevel": "mature",
|
| 148 |
+
"commonUseCases": [
|
| 149 |
+
"Image Q&A",
|
| 150 |
+
"Visual information retrieval",
|
| 151 |
+
"Educational applications",
|
| 152 |
+
"Accessibility"
|
| 153 |
+
],
|
| 154 |
+
"platforms": ["OpenAI", "Anthropic", "Google"],
|
| 155 |
+
"exampleModels": ["GPT-4 Vision", "Claude", "Gemini Vision"]
|
| 156 |
+
}
|
| 157 |
+
},
|
| 158 |
+
{
|
| 159 |
+
"id": "3d-to-text-description",
|
| 160 |
+
"name": "3D Model to Text (Description)",
|
| 161 |
+
"input": {
|
| 162 |
+
"primary": "3d-model",
|
| 163 |
+
"secondary": []
|
| 164 |
+
},
|
| 165 |
+
"output": {
|
| 166 |
+
"primary": "text"
|
| 167 |
+
},
|
| 168 |
+
"characteristics": {
|
| 169 |
+
"processType": "synthesis"
|
| 170 |
+
},
|
| 171 |
+
"metadata": {
|
| 172 |
+
"maturityLevel": "experimental",
|
| 173 |
+
"commonUseCases": [
|
| 174 |
+
"3D model description",
|
| 175 |
+
"Asset cataloging",
|
| 176 |
+
"Model understanding"
|
| 177 |
+
],
|
| 178 |
+
"platforms": ["Experimental"],
|
| 179 |
+
"exampleModels": []
|
| 180 |
+
}
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
"id": "music-to-text-transcription",
|
| 184 |
+
"name": "Music to Text (Transcription)",
|
| 185 |
+
"input": {
|
| 186 |
+
"primary": "music",
|
| 187 |
+
"secondary": []
|
| 188 |
+
},
|
| 189 |
+
"output": {
|
| 190 |
+
"primary": "text"
|
| 191 |
+
},
|
| 192 |
+
"characteristics": {
|
| 193 |
+
"processType": "transformation"
|
| 194 |
+
},
|
| 195 |
+
"metadata": {
|
| 196 |
+
"maturityLevel": "emerging",
|
| 197 |
+
"commonUseCases": [
|
| 198 |
+
"Music notation",
|
| 199 |
+
"Sheet music generation",
|
| 200 |
+
"MIDI to score"
|
| 201 |
+
],
|
| 202 |
+
"platforms": ["Experimental", "AnthemScore"],
|
| 203 |
+
"exampleModels": ["AnthemScore", "Audio to MIDI"]
|
| 204 |
+
}
|
| 205 |
+
},
|
| 206 |
+
{
|
| 207 |
+
"id": "audio-to-text-diarization",
|
| 208 |
+
"name": "Audio to Text (Speaker Diarization)",
|
| 209 |
+
"input": {
|
| 210 |
+
"primary": "audio",
|
| 211 |
+
"secondary": []
|
| 212 |
+
},
|
| 213 |
+
"output": {
|
| 214 |
+
"primary": "text"
|
| 215 |
+
},
|
| 216 |
+
"characteristics": {
|
| 217 |
+
"processType": "transformation"
|
| 218 |
+
},
|
| 219 |
+
"metadata": {
|
| 220 |
+
"maturityLevel": "mature",
|
| 221 |
+
"commonUseCases": [
|
| 222 |
+
"Multi-speaker transcription",
|
| 223 |
+
"Meeting notes with speakers",
|
| 224 |
+
"Interview transcription"
|
| 225 |
+
],
|
| 226 |
+
"platforms": ["AssemblyAI", "Deepgram", "Pyannote"],
|
| 227 |
+
"exampleModels": ["Pyannote", "AssemblyAI", "Whisper + Diarization"]
|
| 228 |
+
}
|
| 229 |
+
}
|
| 230 |
+
]
|
| 231 |
}
|
taxonomy/text-generation/editing/modalities.json
CHANGED
|
@@ -2,6 +2,107 @@
|
|
| 2 |
"fileType": "multimodal-ai-taxonomy",
|
| 3 |
"outputModality": "text",
|
| 4 |
"operationType": "editing",
|
| 5 |
-
"description": "Modalities for editing and transforming existing text content
|
| 6 |
-
"modalities": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
}
|
|
|
|
| 2 |
"fileType": "multimodal-ai-taxonomy",
|
| 3 |
"outputModality": "text",
|
| 4 |
"operationType": "editing",
|
| 5 |
+
"description": "Modalities for editing and transforming existing text content",
|
| 6 |
+
"modalities": [
|
| 7 |
+
{
|
| 8 |
+
"id": "text-to-text-translation",
|
| 9 |
+
"name": "Text Translation",
|
| 10 |
+
"input": {
|
| 11 |
+
"primary": "text",
|
| 12 |
+
"secondary": []
|
| 13 |
+
},
|
| 14 |
+
"output": {
|
| 15 |
+
"primary": "text"
|
| 16 |
+
},
|
| 17 |
+
"characteristics": {
|
| 18 |
+
"processType": "transformation",
|
| 19 |
+
"modification": "transformation"
|
| 20 |
+
},
|
| 21 |
+
"metadata": {
|
| 22 |
+
"maturityLevel": "mature",
|
| 23 |
+
"commonUseCases": [
|
| 24 |
+
"Language translation",
|
| 25 |
+
"Localization",
|
| 26 |
+
"Multilingual content"
|
| 27 |
+
],
|
| 28 |
+
"platforms": ["Google Translate", "DeepL", "OpenAI"],
|
| 29 |
+
"exampleModels": ["Google Translate", "DeepL", "GPT-4", "NLLB"]
|
| 30 |
+
}
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"id": "text-to-text-summarization",
|
| 34 |
+
"name": "Text Summarization",
|
| 35 |
+
"input": {
|
| 36 |
+
"primary": "text",
|
| 37 |
+
"secondary": []
|
| 38 |
+
},
|
| 39 |
+
"output": {
|
| 40 |
+
"primary": "text"
|
| 41 |
+
},
|
| 42 |
+
"characteristics": {
|
| 43 |
+
"processType": "transformation",
|
| 44 |
+
"modification": "transformation"
|
| 45 |
+
},
|
| 46 |
+
"metadata": {
|
| 47 |
+
"maturityLevel": "mature",
|
| 48 |
+
"commonUseCases": [
|
| 49 |
+
"Document summarization",
|
| 50 |
+
"Content condensation",
|
| 51 |
+
"Abstract generation"
|
| 52 |
+
],
|
| 53 |
+
"platforms": ["OpenAI", "Anthropic", "HuggingFace"],
|
| 54 |
+
"exampleModels": ["GPT-4", "Claude", "BART"]
|
| 55 |
+
}
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"id": "text-to-text-paraphrasing",
|
| 59 |
+
"name": "Text Paraphrasing/Rewriting",
|
| 60 |
+
"input": {
|
| 61 |
+
"primary": "text",
|
| 62 |
+
"secondary": []
|
| 63 |
+
},
|
| 64 |
+
"output": {
|
| 65 |
+
"primary": "text"
|
| 66 |
+
},
|
| 67 |
+
"characteristics": {
|
| 68 |
+
"processType": "transformation",
|
| 69 |
+
"modification": "transformation"
|
| 70 |
+
},
|
| 71 |
+
"metadata": {
|
| 72 |
+
"maturityLevel": "mature",
|
| 73 |
+
"commonUseCases": [
|
| 74 |
+
"Content rewriting",
|
| 75 |
+
"Style adjustment",
|
| 76 |
+
"Tone modification"
|
| 77 |
+
],
|
| 78 |
+
"platforms": ["OpenAI", "Anthropic", "QuillBot"],
|
| 79 |
+
"exampleModels": ["GPT-4", "Claude", "QuillBot"]
|
| 80 |
+
}
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"id": "text-to-text-grammar-correction",
|
| 84 |
+
"name": "Grammar & Spelling Correction",
|
| 85 |
+
"input": {
|
| 86 |
+
"primary": "text",
|
| 87 |
+
"secondary": []
|
| 88 |
+
},
|
| 89 |
+
"output": {
|
| 90 |
+
"primary": "text"
|
| 91 |
+
},
|
| 92 |
+
"characteristics": {
|
| 93 |
+
"processType": "enhancement",
|
| 94 |
+
"modification": "enhancement"
|
| 95 |
+
},
|
| 96 |
+
"metadata": {
|
| 97 |
+
"maturityLevel": "mature",
|
| 98 |
+
"commonUseCases": [
|
| 99 |
+
"Proofreading",
|
| 100 |
+
"Error correction",
|
| 101 |
+
"Writing improvement"
|
| 102 |
+
],
|
| 103 |
+
"platforms": ["Grammarly", "LanguageTool", "OpenAI"],
|
| 104 |
+
"exampleModels": ["Grammarly", "LanguageTool", "GPT-4"]
|
| 105 |
+
}
|
| 106 |
+
}
|
| 107 |
+
]
|
| 108 |
}
|
taxonomy/video-generation/creation/modalities.json
CHANGED
|
@@ -323,6 +323,153 @@
|
|
| 323 |
"platforms": ["Blender", "Unreal Engine", "Unity"],
|
| 324 |
"exampleModels": []
|
| 325 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 326 |
}
|
| 327 |
]
|
| 328 |
}
|
|
|
|
| 323 |
"platforms": ["Blender", "Unreal Engine", "Unity"],
|
| 324 |
"exampleModels": []
|
| 325 |
}
|
| 326 |
+
},
|
| 327 |
+
{
|
| 328 |
+
"id": "music-to-vid",
|
| 329 |
+
"name": "Music to Video",
|
| 330 |
+
"input": {
|
| 331 |
+
"primary": "music",
|
| 332 |
+
"secondary": ["text"]
|
| 333 |
+
},
|
| 334 |
+
"output": {
|
| 335 |
+
"primary": "video",
|
| 336 |
+
"audio": true,
|
| 337 |
+
"audioType": "music"
|
| 338 |
+
},
|
| 339 |
+
"characteristics": {
|
| 340 |
+
"processType": "synthesis",
|
| 341 |
+
"audioVisualization": true,
|
| 342 |
+
"motionType": "audio-reactive",
|
| 343 |
+
"audioVideoSync": true
|
| 344 |
+
},
|
| 345 |
+
"metadata": {
|
| 346 |
+
"maturityLevel": "emerging",
|
| 347 |
+
"commonUseCases": [
|
| 348 |
+
"Music video generation",
|
| 349 |
+
"Lyric videos",
|
| 350 |
+
"Album visualizers"
|
| 351 |
+
],
|
| 352 |
+
"platforms": ["Replicate", "Experimental"],
|
| 353 |
+
"exampleModels": ["Stable Diffusion Video"]
|
| 354 |
+
}
|
| 355 |
+
},
|
| 356 |
+
{
|
| 357 |
+
"id": "text-to-vid-music",
|
| 358 |
+
"name": "Text to Video with Music",
|
| 359 |
+
"input": {
|
| 360 |
+
"primary": "text",
|
| 361 |
+
"secondary": []
|
| 362 |
+
},
|
| 363 |
+
"output": {
|
| 364 |
+
"primary": "video",
|
| 365 |
+
"audio": true,
|
| 366 |
+
"audioType": "music"
|
| 367 |
+
},
|
| 368 |
+
"characteristics": {
|
| 369 |
+
"processType": "synthesis",
|
| 370 |
+
"audioGeneration": "synthesized",
|
| 371 |
+
"audioPrompting": "text-based",
|
| 372 |
+
"audioVideoSync": true,
|
| 373 |
+
"motionType": "general"
|
| 374 |
+
},
|
| 375 |
+
"metadata": {
|
| 376 |
+
"maturityLevel": "experimental",
|
| 377 |
+
"commonUseCases": [
|
| 378 |
+
"Promotional videos",
|
| 379 |
+
"Social media content",
|
| 380 |
+
"Advertisement creation"
|
| 381 |
+
],
|
| 382 |
+
"platforms": ["Experimental"],
|
| 383 |
+
"exampleModels": []
|
| 384 |
+
}
|
| 385 |
+
},
|
| 386 |
+
{
|
| 387 |
+
"id": "img-to-vid-music",
|
| 388 |
+
"name": "Image to Video with Music",
|
| 389 |
+
"input": {
|
| 390 |
+
"primary": "image",
|
| 391 |
+
"secondary": ["text"]
|
| 392 |
+
},
|
| 393 |
+
"output": {
|
| 394 |
+
"primary": "video",
|
| 395 |
+
"audio": true,
|
| 396 |
+
"audioType": "music"
|
| 397 |
+
},
|
| 398 |
+
"characteristics": {
|
| 399 |
+
"processType": "synthesis",
|
| 400 |
+
"audioGeneration": "synthesized",
|
| 401 |
+
"audioPrompting": "text-based",
|
| 402 |
+
"motionType": "general",
|
| 403 |
+
"audioCharacteristics": ["melodic", "rhythmic"]
|
| 404 |
+
},
|
| 405 |
+
"metadata": {
|
| 406 |
+
"maturityLevel": "experimental",
|
| 407 |
+
"commonUseCases": [
|
| 408 |
+
"Photo slideshow creation",
|
| 409 |
+
"Social media posts",
|
| 410 |
+
"Memory videos"
|
| 411 |
+
],
|
| 412 |
+
"platforms": ["Experimental"],
|
| 413 |
+
"exampleModels": []
|
| 414 |
+
}
|
| 415 |
+
},
|
| 416 |
+
{
|
| 417 |
+
"id": "vid-to-vid-upscale",
|
| 418 |
+
"name": "Video Upscaling",
|
| 419 |
+
"input": {
|
| 420 |
+
"primary": "video",
|
| 421 |
+
"secondary": []
|
| 422 |
+
},
|
| 423 |
+
"output": {
|
| 424 |
+
"primary": "video",
|
| 425 |
+
"audio": true,
|
| 426 |
+
"audioType": "original"
|
| 427 |
+
},
|
| 428 |
+
"characteristics": {
|
| 429 |
+
"processType": "enhancement",
|
| 430 |
+
"audioHandling": "passthrough",
|
| 431 |
+
"preserveAudio": true
|
| 432 |
+
},
|
| 433 |
+
"metadata": {
|
| 434 |
+
"maturityLevel": "mature",
|
| 435 |
+
"commonUseCases": [
|
| 436 |
+
"Resolution enhancement",
|
| 437 |
+
"Quality improvement",
|
| 438 |
+
"Restoration"
|
| 439 |
+
],
|
| 440 |
+
"platforms": ["Topaz", "Replicate"],
|
| 441 |
+
"exampleModels": ["Topaz Video AI", "Real-ESRGAN Video"]
|
| 442 |
+
}
|
| 443 |
+
},
|
| 444 |
+
{
|
| 445 |
+
"id": "multimodal-vid-text-to-vid",
|
| 446 |
+
"name": "Video + Text to Video",
|
| 447 |
+
"input": {
|
| 448 |
+
"primary": "video",
|
| 449 |
+
"secondary": ["text"]
|
| 450 |
+
},
|
| 451 |
+
"output": {
|
| 452 |
+
"primary": "video",
|
| 453 |
+
"audio": true,
|
| 454 |
+
"audioType": "original"
|
| 455 |
+
},
|
| 456 |
+
"characteristics": {
|
| 457 |
+
"processType": "transformation",
|
| 458 |
+
"guidanceType": "text-and-visual",
|
| 459 |
+
"audioHandling": "passthrough",
|
| 460 |
+
"preserveAudio": true,
|
| 461 |
+
"motionType": "guided"
|
| 462 |
+
},
|
| 463 |
+
"metadata": {
|
| 464 |
+
"maturityLevel": "emerging",
|
| 465 |
+
"commonUseCases": [
|
| 466 |
+
"Video-guided generation",
|
| 467 |
+
"Motion transfer",
|
| 468 |
+
"Style-guided video"
|
| 469 |
+
],
|
| 470 |
+
"platforms": ["Replicate", "RunwayML"],
|
| 471 |
+
"exampleModels": ["Gen-2", "Pika"]
|
| 472 |
+
}
|
| 473 |
}
|
| 474 |
]
|
| 475 |
}
|
taxonomy/video-generation/editing/modalities.json
CHANGED
|
@@ -58,6 +58,217 @@
|
|
| 58 |
"platforms": ["Replicate", "RunwayML"],
|
| 59 |
"exampleModels": []
|
| 60 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
}
|
| 62 |
]
|
| 63 |
}
|
|
|
|
| 58 |
"platforms": ["Replicate", "RunwayML"],
|
| 59 |
"exampleModels": []
|
| 60 |
}
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"id": "vid-to-vid-inpainting",
|
| 64 |
+
"name": "Video Inpainting",
|
| 65 |
+
"input": {
|
| 66 |
+
"primary": "video",
|
| 67 |
+
"secondary": ["text"]
|
| 68 |
+
},
|
| 69 |
+
"output": {
|
| 70 |
+
"primary": "video",
|
| 71 |
+
"audio": true,
|
| 72 |
+
"audioType": "original"
|
| 73 |
+
},
|
| 74 |
+
"characteristics": {
|
| 75 |
+
"processType": "inpainting",
|
| 76 |
+
"transformationTypes": ["object-editing"],
|
| 77 |
+
"preserveAudio": true,
|
| 78 |
+
"audioHandling": "passthrough",
|
| 79 |
+
"modification": "selective-editing"
|
| 80 |
+
},
|
| 81 |
+
"metadata": {
|
| 82 |
+
"maturityLevel": "emerging",
|
| 83 |
+
"commonUseCases": [
|
| 84 |
+
"Object removal",
|
| 85 |
+
"Background replacement",
|
| 86 |
+
"Video cleanup"
|
| 87 |
+
],
|
| 88 |
+
"platforms": ["Replicate", "RunwayML", "Adobe"],
|
| 89 |
+
"exampleModels": ["ProPainter", "E2FGVI"]
|
| 90 |
+
}
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"id": "vid-to-vid-enhancement",
|
| 94 |
+
"name": "Video Enhancement",
|
| 95 |
+
"input": {
|
| 96 |
+
"primary": "video",
|
| 97 |
+
"secondary": []
|
| 98 |
+
},
|
| 99 |
+
"output": {
|
| 100 |
+
"primary": "video",
|
| 101 |
+
"audio": true,
|
| 102 |
+
"audioType": "original"
|
| 103 |
+
},
|
| 104 |
+
"characteristics": {
|
| 105 |
+
"processType": "enhancement",
|
| 106 |
+
"transformationTypes": ["enhancement"],
|
| 107 |
+
"preserveAudio": true,
|
| 108 |
+
"audioHandling": "passthrough",
|
| 109 |
+
"modification": "enhancement"
|
| 110 |
+
},
|
| 111 |
+
"metadata": {
|
| 112 |
+
"maturityLevel": "mature",
|
| 113 |
+
"commonUseCases": [
|
| 114 |
+
"Quality improvement",
|
| 115 |
+
"Noise reduction",
|
| 116 |
+
"Color grading",
|
| 117 |
+
"Stabilization"
|
| 118 |
+
],
|
| 119 |
+
"platforms": ["Topaz", "Adobe", "Replicate"],
|
| 120 |
+
"exampleModels": ["Topaz Video AI", "DAIN", "Real-ESRGAN"]
|
| 121 |
+
}
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"id": "vid-to-vid-interpolation",
|
| 125 |
+
"name": "Video Frame Interpolation",
|
| 126 |
+
"input": {
|
| 127 |
+
"primary": "video",
|
| 128 |
+
"secondary": []
|
| 129 |
+
},
|
| 130 |
+
"output": {
|
| 131 |
+
"primary": "video",
|
| 132 |
+
"audio": true,
|
| 133 |
+
"audioType": "original"
|
| 134 |
+
},
|
| 135 |
+
"characteristics": {
|
| 136 |
+
"processType": "enhancement",
|
| 137 |
+
"transformationTypes": ["motion-modification"],
|
| 138 |
+
"preserveAudio": true,
|
| 139 |
+
"audioHandling": "passthrough",
|
| 140 |
+
"modification": "enhancement"
|
| 141 |
+
},
|
| 142 |
+
"metadata": {
|
| 143 |
+
"maturityLevel": "mature",
|
| 144 |
+
"commonUseCases": [
|
| 145 |
+
"Frame rate increase",
|
| 146 |
+
"Slow motion creation",
|
| 147 |
+
"Smooth motion"
|
| 148 |
+
],
|
| 149 |
+
"platforms": ["Topaz", "Replicate"],
|
| 150 |
+
"exampleModels": ["RIFE", "DAIN", "Flowframes"]
|
| 151 |
+
}
|
| 152 |
+
},
|
| 153 |
+
{
|
| 154 |
+
"id": "vid-to-vid-colorization",
|
| 155 |
+
"name": "Video Colorization",
|
| 156 |
+
"input": {
|
| 157 |
+
"primary": "video",
|
| 158 |
+
"secondary": []
|
| 159 |
+
},
|
| 160 |
+
"output": {
|
| 161 |
+
"primary": "video",
|
| 162 |
+
"audio": true,
|
| 163 |
+
"audioType": "original"
|
| 164 |
+
},
|
| 165 |
+
"characteristics": {
|
| 166 |
+
"processType": "transformation",
|
| 167 |
+
"transformationTypes": ["enhancement"],
|
| 168 |
+
"preserveAudio": true,
|
| 169 |
+
"audioHandling": "passthrough",
|
| 170 |
+
"modification": "enhancement"
|
| 171 |
+
},
|
| 172 |
+
"metadata": {
|
| 173 |
+
"maturityLevel": "emerging",
|
| 174 |
+
"commonUseCases": [
|
| 175 |
+
"Black and white restoration",
|
| 176 |
+
"Historical footage colorization",
|
| 177 |
+
"Archival restoration"
|
| 178 |
+
],
|
| 179 |
+
"platforms": ["Replicate", "DeOldify"],
|
| 180 |
+
"exampleModels": ["DeOldify", "Video Colorization"]
|
| 181 |
+
}
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
"id": "vid-to-vid-deepfake",
|
| 185 |
+
"name": "Video Face Swap",
|
| 186 |
+
"input": {
|
| 187 |
+
"primary": "video",
|
| 188 |
+
"secondary": ["image"]
|
| 189 |
+
},
|
| 190 |
+
"output": {
|
| 191 |
+
"primary": "video",
|
| 192 |
+
"audio": true,
|
| 193 |
+
"audioType": "original"
|
| 194 |
+
},
|
| 195 |
+
"characteristics": {
|
| 196 |
+
"processType": "transformation",
|
| 197 |
+
"transformationTypes": ["object-editing"],
|
| 198 |
+
"preserveAudio": true,
|
| 199 |
+
"audioHandling": "passthrough",
|
| 200 |
+
"modification": "selective-editing"
|
| 201 |
+
},
|
| 202 |
+
"metadata": {
|
| 203 |
+
"maturityLevel": "mature",
|
| 204 |
+
"commonUseCases": [
|
| 205 |
+
"Face replacement",
|
| 206 |
+
"Character substitution",
|
| 207 |
+
"Visual effects"
|
| 208 |
+
],
|
| 209 |
+
"platforms": ["Replicate", "DeepFaceLab"],
|
| 210 |
+
"exampleModels": ["DeepFaceLab", "Roop", "FaceSwap"]
|
| 211 |
+
}
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"id": "vid-to-vid-relighting",
|
| 215 |
+
"name": "Video Relighting",
|
| 216 |
+
"input": {
|
| 217 |
+
"primary": "video",
|
| 218 |
+
"secondary": ["text"]
|
| 219 |
+
},
|
| 220 |
+
"output": {
|
| 221 |
+
"primary": "video",
|
| 222 |
+
"audio": true,
|
| 223 |
+
"audioType": "original"
|
| 224 |
+
},
|
| 225 |
+
"characteristics": {
|
| 226 |
+
"processType": "transformation",
|
| 227 |
+
"transformationTypes": ["enhancement"],
|
| 228 |
+
"preserveAudio": true,
|
| 229 |
+
"audioHandling": "passthrough",
|
| 230 |
+
"modification": "enhancement"
|
| 231 |
+
},
|
| 232 |
+
"metadata": {
|
| 233 |
+
"maturityLevel": "experimental",
|
| 234 |
+
"commonUseCases": [
|
| 235 |
+
"Lighting adjustment",
|
| 236 |
+
"Time of day change",
|
| 237 |
+
"Mood alteration"
|
| 238 |
+
],
|
| 239 |
+
"platforms": ["Experimental"],
|
| 240 |
+
"exampleModels": []
|
| 241 |
+
}
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"id": "vid-to-vid-segmentation",
|
| 245 |
+
"name": "Video Segmentation",
|
| 246 |
+
"input": {
|
| 247 |
+
"primary": "video",
|
| 248 |
+
"secondary": ["text"]
|
| 249 |
+
},
|
| 250 |
+
"output": {
|
| 251 |
+
"primary": "video",
|
| 252 |
+
"audio": true,
|
| 253 |
+
"audioType": "original"
|
| 254 |
+
},
|
| 255 |
+
"characteristics": {
|
| 256 |
+
"processType": "transformation",
|
| 257 |
+
"transformationTypes": ["object-editing"],
|
| 258 |
+
"preserveAudio": true,
|
| 259 |
+
"audioHandling": "passthrough",
|
| 260 |
+
"modification": "selective-editing"
|
| 261 |
+
},
|
| 262 |
+
"metadata": {
|
| 263 |
+
"maturityLevel": "emerging",
|
| 264 |
+
"commonUseCases": [
|
| 265 |
+
"Background removal",
|
| 266 |
+
"Object isolation",
|
| 267 |
+
"Green screen replacement"
|
| 268 |
+
],
|
| 269 |
+
"platforms": ["Replicate", "Runway", "Unscreen"],
|
| 270 |
+
"exampleModels": ["Segment Anything Video", "XMem", "Cutout.pro"]
|
| 271 |
+
}
|
| 272 |
}
|
| 273 |
]
|
| 274 |
}
|