openfree commited on
Commit
c546294
1 Parent(s): 80e15e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -948
app.py CHANGED
@@ -1,949 +1,2 @@
1
- import gradio as gr
2
- import requests
3
- import pandas as pd
4
- import plotly.graph_objects as go
5
- from datetime import datetime
6
  import os
7
-
8
- HF_TOKEN = os.getenv("HF_TOKEN")
9
-
10
- target_models = {
11
- "openfree/flux-lora-korea-palace": "https://huggingface.co/openfree/flux-lora-korea-palace",
12
- "seawolf2357/hanbok": "https://huggingface.co/seawolf2357/hanbok",
13
- "LGAI-EXAONE/EXAONE-3.5-32B-Instruct": "https://huggingface.co/LGAI-EXAONE/EXAONE-3.5-32B-Instruct",
14
- "LGAI-EXAONE/EXAONE-3.5-2.4B-Instruct": "https://huggingface.co/LGAI-EXAONE/EXAONE-3.5-2.4B-Instruct",
15
- "LGAI-EXAONE/EXAONE-3.5-7.8B-Instruct": "https://huggingface.co/LGAI-EXAONE/EXAONE-3.5-7.8B-Instruct",
16
- "ginipick/flux-lora-eric-cat": "https://huggingface.co/ginipick/flux-lora-eric-cat",
17
- "seawolf2357/flux-lora-car-rolls-royce": "https://huggingface.co/seawolf2357/flux-lora-car-rolls-royce",
18
-
19
- "moreh/Llama-3-Motif-102B-Instruct": "https://huggingface.co/moreh/Llama-3-Motif-102B-Instruct",
20
- "moreh/Llama-3-Motif-102B": "https://huggingface.co/moreh/Llama-3-Motif-102B",
21
- "Samsung/TinyClick": "https://huggingface.co/Samsung/TinyClick",
22
-
23
- "Saxo/Linkbricks-Horizon-AI-Korean-Gemma-2-sft-dpo-27B": "https://huggingface.co/Saxo/Linkbricks-Horizon-AI-Korean-Gemma-2-sft-dpo-27B",
24
- "AALF/gemma-2-27b-it-SimPO-37K": "https://huggingface.co/AALF/gemma-2-27b-it-SimPO-37K",
25
- "nbeerbower/mistral-nemo-wissenschaft-12B": "https://huggingface.co/nbeerbower/mistral-nemo-wissenschaft-12B",
26
- "Saxo/Linkbricks-Horizon-AI-Korean-Mistral-Nemo-sft-dpo-12B": "https://huggingface.co/Saxo/Linkbricks-Horizon-AI-Korean-Mistral-Nemo-sft-dpo-12B",
27
- "princeton-nlp/gemma-2-9b-it-SimPO": "https://huggingface.co/princeton-nlp/gemma-2-9b-it-SimPO",
28
- "migtissera/Tess-v2.5-Gemma-2-27B-alpha": "https://huggingface.co/migtissera/Tess-v2.5-Gemma-2-27B-alpha",
29
- "DeepMount00/Llama-3.1-8b-Ita": "https://huggingface.co/DeepMount00/Llama-3.1-8b-Ita",
30
- "cognitivecomputations/dolphin-2.9.3-mistral-nemo-12b": "https://huggingface.co/cognitivecomputations/dolphin-2.9.3-mistral-nemo-12b",
31
- "ai-human-lab/EEVE-Korean_Instruct-10.8B-expo": "https://huggingface.co/ai-human-lab/EEVE-Korean_Instruct-10.8B-expo",
32
- "VAGOsolutions/Llama-3.1-SauerkrautLM-8b-Instruct": "https://huggingface.co/VAGOsolutions/Llama-3.1-SauerkrautLM-8b-Instruct",
33
- "Saxo/Linkbricks-Horizon-AI-Korean-llama-3.1-sft-dpo-8B": "https://huggingface.co/Saxo/Linkbricks-Horizon-AI-Korean-llama-3.1-sft-dpo-8B",
34
- "AIDX-ktds/ktdsbaseLM-v0.12-based-on-openchat3.5": "https://huggingface.co/AIDX-ktds/ktdsbaseLM-v0.12-based-on-openchat3.5",
35
- "mlabonne/Daredevil-8B-abliterated": "https://huggingface.co/mlabonne/Daredevil-8B-abliterated",
36
- "ENERGY-DRINK-LOVE/eeve_dpo-v3": "https://huggingface.co/ENERGY-DRINK-LOVE/eeve_dpo-v3",
37
- "migtissera/Trinity-2-Codestral-22B": "https://huggingface.co/migtissera/Trinity-2-Codestral-22B",
38
- "Saxo/Linkbricks-Horizon-AI-Korean-llama3.1-sft-rlhf-dpo-8B": "https://huggingface.co/Saxo/Linkbricks-Horizon-AI-Korean-llama3.1-sft-rlhf-dpo-8B",
39
- "mlabonne/Daredevil-8B-abliterated-dpomix": "https://huggingface.co/mlabonne/Daredevil-8B-abliterated-dpomix",
40
- "yanolja/EEVE-Korean-Instruct-10.8B-v1.0": "https://huggingface.co/yanolja/EEVE-Korean-Instruct-10.8B-v1.0",
41
- "vicgalle/Configurable-Llama-3.1-8B-Instruct": "https://huggingface.co/vicgalle/Configurable-Llama-3.1-8B-Instruct",
42
- "T3Q-LLM/T3Q-LLM1-sft1.0-dpo1.0": "https://huggingface.co/T3Q-LLM/T3Q-LLM1-sft1.0-dpo1.0",
43
- "Eurdem/Defne-llama3.1-8B": "https://huggingface.co/Eurdem/Defne-llama3.1-8B",
44
- "BAAI/Infinity-Instruct-7M-Gen-Llama3_1-8B": "https://huggingface.co/BAAI/Infinity-Instruct-7M-Gen-Llama3_1-8B",
45
- "BAAI/Infinity-Instruct-3M-0625-Llama3-8B": "https://huggingface.co/BAAI/Infinity-Instruct-3M-0625-Llama3-8B",
46
- "T3Q-LLM/T3Q-LLM-sft1.0-dpo1.0": "https://huggingface.co/T3Q-LLM/T3Q-LLM-sft1.0-dpo1.0",
47
- "BAAI/Infinity-Instruct-7M-0729-Llama3_1-8B": "https://huggingface.co/BAAI/Infinity-Instruct-7M-0729-Llama3_1-8B",
48
- "mightbe/EEVE-10.8B-Multiturn": "https://huggingface.co/mightbe/EEVE-10.8B-Multiturn",
49
- "hyemijo/omed-llama3.1-8b": "https://huggingface.co/hyemijo/omed-llama3.1-8b",
50
- "yanolja/Bookworm-10.7B-v0.4-DPO": "https://huggingface.co/yanolja/Bookworm-10.7B-v0.4-DPO",
51
- "algograp-Inc/algograpV4": "https://huggingface.co/algograp-Inc/algograpV4",
52
- "lightblue/suzume-llama-3-8B-multilingual-orpo-borda-top75": "https://huggingface.co/lightblue/suzume-llama-3-8B-multilingual-orpo-borda-top75",
53
- "chihoonlee10/T3Q-LLM-MG-DPO-v1.0": "https://huggingface.co/chihoonlee10/T3Q-LLM-MG-DPO-v1.0",
54
- "vicgalle/Configurable-Hermes-2-Pro-Llama-3-8B": "https://huggingface.co/vicgalle/Configurable-Hermes-2-Pro-Llama-3-8B",
55
- "RLHFlow/LLaMA3-iterative-DPO-final": "https://huggingface.co/RLHFlow/LLaMA3-iterative-DPO-final",
56
- "SEOKDONG/llama3.1_korean_v0.1_sft_by_aidx": "https://huggingface.co/SEOKDONG/llama3.1_korean_v0.1_sft_by_aidx",
57
- "spow12/Ko-Qwen2-7B-Instruct": "https://huggingface.co/spow12/Ko-Qwen2-7B-Instruct",
58
- "BAAI/Infinity-Instruct-3M-0625-Qwen2-7B": "https://huggingface.co/BAAI/Infinity-Instruct-3M-0625-Qwen2-7B",
59
- "lightblue/suzume-llama-3-8B-multilingual-orpo-borda-half": "https://huggingface.co/lightblue/suzume-llama-3-8B-multilingual-orpo-borda-half",
60
- "T3Q-LLM/T3Q-LLM1-CV-v2.0": "https://huggingface.co/T3Q-LLM/T3Q-LLM1-CV-v2.0",
61
- "migtissera/Trinity-2-Codestral-22B-v0.2": "https://huggingface.co/migtissera/Trinity-2-Codestral-22B-v0.2",
62
- "sinjy1203/EEVE-Korean-Instruct-10.8B-v1.0-Grade-Retrieval": "https://huggingface.co/sinjy1203/EEVE-Korean-Instruct-10.8B-v1.0-Grade-Retrieval",
63
- "MaziyarPanahi/Llama-3-8B-Instruct-v0.10": "https://huggingface.co/MaziyarPanahi/Llama-3-8B-Instruct-v0.10",
64
- "MaziyarPanahi/Llama-3-8B-Instruct-v0.9": "https://huggingface.co/MaziyarPanahi/Llama-3-8B-Instruct-v0.9",
65
- "zhengr/MixTAO-7Bx2-MoE-v8.1": "https://huggingface.co/zhengr/MixTAO-7Bx2-MoE-v8.1",
66
- "TIGER-Lab/MAmmoTH2-8B-Plus": "https://huggingface.co/TIGER-Lab/MAmmoTH2-8B-Plus",
67
- "OpenBuddy/openbuddy-qwen1.5-14b-v21.1-32k": "https://huggingface.co/OpenBuddy/openbuddy-qwen1.5-14b-v21.1-32k",
68
- "haoranxu/Llama-3-Instruct-8B-CPO-SimPO": "https://huggingface.co/haoranxu/Llama-3-Instruct-8B-CPO-SimPO",
69
- "Weyaxi/Einstein-v7-Qwen2-7B": "https://huggingface.co/Weyaxi/Einstein-v7-Qwen2-7B",
70
- "DKYoon/kosolar-hermes-test": "https://huggingface.co/DKYoon/kosolar-hermes-test",
71
- "vilm/Quyen-Pro-v0.1": "https://huggingface.co/vilm/Quyen-Pro-v0.1",
72
- "chihoonlee10/T3Q-LLM-MG-v1.0": "https://huggingface.co/chihoonlee10/T3Q-LLM-MG-v1.0",
73
- "lightblue/suzume-llama-3-8B-multilingual-orpo-borda-top25": "https://huggingface.co/lightblue/suzume-llama-3-8B-multilingual-orpo-borda-top25",
74
- "ai-human-lab/EEVE-Korean-10.8B-RAFT": "https://huggingface.co/ai-human-lab/EEVE-Korean-10.8B-RAFT",
75
- "princeton-nlp/Llama-3-Base-8B-SFT-RDPO": "https://huggingface.co/princeton-nlp/Llama-3-Base-8B-SFT-RDPO",
76
- "MaziyarPanahi/Llama-3-8B-Instruct-v0.8": "https://huggingface.co/MaziyarPanahi/Llama-3-8B-Instruct-v0.8",
77
- "chihoonlee10/T3Q-ko-solar-dpo-v7.0": "https://huggingface.co/chihoonlee10/T3Q-ko-solar-dpo-v7.0",
78
- "jondurbin/bagel-8b-v1.0": "https://huggingface.co/jondurbin/bagel-8b-v1.0",
79
- "DeepMount00/Llama-3-8b-Ita": "https://huggingface.co/DeepMount00/Llama-3-8b-Ita",
80
- "VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct": "https://huggingface.co/VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct",
81
- "princeton-nlp/Llama-3-Instruct-8B-ORPO-v0.2": "https://huggingface.co/princeton-nlp/Llama-3-Instruct-8B-ORPO-v0.2",
82
- "AIDX-ktds/ktdsbaseLM-v0.11-based-on-openchat3.5": "https://huggingface.co/AIDX-ktds/ktdsbaseLM-v0.11-based-on-openchat3.5",
83
- "princeton-nlp/Llama-3-Base-8B-SFT-KTO": "https://huggingface.co/princeton-nlp/Llama-3-Base-8B-SFT-KTO",
84
- "maywell/Mini_Synatra_SFT": "https://huggingface.co/maywell/Mini_Synatra_SFT",
85
- "princeton-nlp/Llama-3-Base-8B-SFT-ORPO": "https://huggingface.co/princeton-nlp/Llama-3-Base-8B-SFT-ORPO",
86
- "princeton-nlp/Llama-3-Instruct-8B-CPO-v0.2": "https://huggingface.co/princeton-nlp/Llama-3-Instruct-8B-CPO-v0.2",
87
- "spow12/Qwen2-7B-ko-Instruct-orpo-ver_2.0_wo_chat": "https://huggingface.co/spow12/Qwen2-7B-ko-Instruct-orpo-ver_2.0_wo_chat",
88
- "princeton-nlp/Llama-3-Base-8B-SFT-DPO": "https://huggingface.co/princeton-nlp/Llama-3-Base-8B-SFT-DPO",
89
- "princeton-nlp/Llama-3-Instruct-8B-ORPO": "https://huggingface.co/princeton-nlp/Llama-3-Instruct-8B-ORPO",
90
- "lcw99/llama-3-10b-it-kor-extented-chang": "https://huggingface.co/lcw99/llama-3-10b-it-kor-extented-chang",
91
- "migtissera/Llama-3-8B-Synthia-v3.5": "https://huggingface.co/migtissera/Llama-3-8B-Synthia-v3.5",
92
- "megastudyedu/M-SOLAR-10.7B-v1.4-dpo": "https://huggingface.co/megastudyedu/M-SOLAR-10.7B-v1.4-dpo",
93
- "T3Q-LLM/T3Q-LLM-solar10.8-sft-v1.0": "https://huggingface.co/T3Q-LLM/T3Q-LLM-solar10.8-sft-v1.0",
94
- "maywell/Synatra-10.7B-v0.4": "https://huggingface.co/maywell/Synatra-10.7B-v0.4",
95
- "nlpai-lab/KULLM3": "https://huggingface.co/nlpai-lab/KULLM3",
96
- "abacusai/Llama-3-Smaug-8B": "https://huggingface.co/abacusai/Llama-3-Smaug-8B",
97
- "gwonny/nox-solar-10.7b-v4-kolon-ITD-5-v2.1": "https://huggingface.co/gwonny/nox-solar-10.7b-v4-kolon-ITD-5-v2.1",
98
- "BAAI/Infinity-Instruct-3M-0625-Mistral-7B": "https://huggingface.co/BAAI/Infinity-Instruct-3M-0625-Mistral-7B",
99
- "openchat/openchat_3.5": "https://huggingface.co/openchat/openchat_3.5",
100
- "T3Q-LLM/T3Q-LLM1-v2.0": "https://huggingface.co/T3Q-LLM/T3Q-LLM1-v2.0",
101
- "T3Q-LLM/T3Q-LLM1-CV-v1.0": "https://huggingface.co/T3Q-LLM/T3Q-LLM1-CV-v1.0",
102
- "ONS-AI-RESEARCH/ONS-SOLAR-10.7B-v1.1": "https://huggingface.co/ONS-AI-RESEARCH/ONS-SOLAR-10.7B-v1.1",
103
- "macadeliccc/Samantha-Qwen-2-7B": "https://huggingface.co/macadeliccc/Samantha-Qwen-2-7B",
104
- "openchat/openchat-3.5-0106": "https://huggingface.co/openchat/openchat-3.5-0106",
105
- "NousResearch/Nous-Hermes-2-SOLAR-10.7B": "https://huggingface.co/NousResearch/Nous-Hermes-2-SOLAR-10.7B",
106
- "UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter1": "https://huggingface.co/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter1",
107
- "MTSAIR/multi_verse_model": "https://huggingface.co/MTSAIR/multi_verse_model",
108
- "gwonny/nox-solar-10.7b-v4-kolon-ITD-5-v2.0": "https://huggingface.co/gwonny/nox-solar-10.7b-v4-kolon-ITD-5-v2.0",
109
- "VIRNECT/llama-3-Korean-8B": "https://huggingface.co/VIRNECT/llama-3-Korean-8B",
110
- "ENERGY-DRINK-LOVE/SOLAR_merge_DPOv3": "https://huggingface.co/ENERGY-DRINK-LOVE/SOLAR_merge_DPOv3",
111
- "SeaLLMs/SeaLLMs-v3-7B-Chat": "https://huggingface.co/SeaLLMs/SeaLLMs-v3-7B-Chat",
112
- "VIRNECT/llama-3-Korean-8B-V2": "https://huggingface.co/VIRNECT/llama-3-Korean-8B-V2",
113
- "MLP-KTLim/llama-3-Korean-Bllossom-8B": "https://huggingface.co/MLP-KTLim/llama-3-Korean-Bllossom-8B",
114
- "Magpie-Align/Llama-3-8B-Magpie-Align-v0.3": "https://huggingface.co/Magpie-Align/Llama-3-8B-Magpie-Align-v0.3",
115
- "cognitivecomputations/Llama-3-8B-Instruct-abliterated-v2": "https://huggingface.co/cognitivecomputations/Llama-3-8B-Instruct-abliterated-v2",
116
- "SkyOrbis/SKY-Ko-Llama3-8B-lora": "https://huggingface.co/SkyOrbis/SKY-Ko-Llama3-8B-lora",
117
- "4yo1/llama3-eng-ko-8b-sl5": "https://huggingface.co/4yo1/llama3-eng-ko-8b-sl5",
118
- "kimwooglae/WebSquareAI-Instruct-llama-3-8B-v0.5.39": "https://huggingface.co/kimwooglae/WebSquareAI-Instruct-llama-3-8B-v0.5.39",
119
- "ONS-AI-RESEARCH/ONS-SOLAR-10.7B-v1.2": "https://huggingface.co/ONS-AI-RESEARCH/ONS-SOLAR-10.7B-v1.2",
120
- "lcw99/llama-3-10b-it-kor-extented-chang-pro8": "https://huggingface.co/lcw99/llama-3-10b-it-kor-extented-chang-pro8",
121
- "BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B": "https://huggingface.co/BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B",
122
- "migtissera/Tess-2.0-Llama-3-8B": "https://huggingface.co/migtissera/Tess-2.0-Llama-3-8B",
123
- "BAAI/Infinity-Instruct-3M-0613-Mistral-7B": "https://huggingface.co/BAAI/Infinity-Instruct-3M-0613-Mistral-7B",
124
- "yeonwoo780/cydinfo-llama3-8b-lora-v01": "https://huggingface.co/yeonwoo780/cydinfo-llama3-8b-lora-v01",
125
- "vicgalle/ConfigurableSOLAR-10.7B": "https://huggingface.co/vicgalle/ConfigurableSOLAR-10.7B",
126
- "chihoonlee10/T3Q-ko-solar-jo-v1.0": "https://huggingface.co/chihoonlee10/T3Q-ko-solar-jo-v1.0",
127
- "Kukedlc/NeuralLLaMa-3-8b-ORPO-v0.4": "https://huggingface.co/Kukedlc/NeuralLLaMa-3-8b-ORPO-v0.4",
128
- "Edentns/DataVortexS-10.7B-dpo-v1.0": "https://huggingface.co/Edentns/DataVortexS-10.7B-dpo-v1.0",
129
- "SJ-Donald/SJ-SOLAR-10.7b-DPO": "https://huggingface.co/SJ-Donald/SJ-SOLAR-10.7b-DPO",
130
- "lemon-mint/gemma-ko-7b-it-v0.40": "https://huggingface.co/lemon-mint/gemma-ko-7b-it-v0.40",
131
- "GyuHyeonWkdWkdMan/naps-llama-3.1-8b-instruct-v0.3": "https://huggingface.co/GyuHyeonWkdWkdMan/naps-llama-3.1-8b-instruct-v0.3",
132
- "hyeogi/SOLAR-10.7B-v1.5": "https://huggingface.co/hyeogi/SOLAR-10.7B-v1.5",
133
- "etri-xainlp/llama3-8b-dpo_v1": "https://huggingface.co/etri-xainlp/llama3-8b-dpo_v1",
134
- "LDCC/LDCC-SOLAR-10.7B": "https://huggingface.co/LDCC/LDCC-SOLAR-10.7B",
135
- "chlee10/T3Q-Llama3-8B-Inst-sft1.0": "https://huggingface.co/chlee10/T3Q-Llama3-8B-Inst-sft1.0",
136
- "lemon-mint/gemma-ko-7b-it-v0.41": "https://huggingface.co/lemon-mint/gemma-ko-7b-it-v0.41",
137
- "chlee10/T3Q-Llama3-8B-sft1.0-dpo1.0": "https://huggingface.co/chlee10/T3Q-Llama3-8B-sft1.0-dpo1.0",
138
- "maywell/Synatra-7B-Instruct-v0.3-pre": "https://huggingface.co/maywell/Synatra-7B-Instruct-v0.3-pre",
139
- "UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter2": "https://huggingface.co/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter2",
140
- "hwkwon/S-SOLAR-10.7B-v1.4": "https://huggingface.co/hwkwon/S-SOLAR-10.7B-v1.4",
141
- "12thD/ko-Llama-3-8B-sft-v0.3": "https://huggingface.co/12thD/ko-Llama-3-8B-sft-v0.3",
142
- "hkss/hk-SOLAR-10.7B-v1.4": "https://huggingface.co/hkss/hk-SOLAR-10.7B-v1.4",
143
- "lookuss/test-llilu": "https://huggingface.co/lookuss/test-llilu",
144
- "chihoonlee10/T3Q-ko-solar-dpo-v3.0": "https://huggingface.co/chihoonlee10/T3Q-ko-solar-dpo-v3.0",
145
- "chihoonlee10/T3Q-ko-solar-dpo-v1.0": "https://huggingface.co/chihoonlee10/T3Q-ko-solar-dpo-v1.0",
146
- "lcw99/llama-3-10b-wiki-240709-f": "https://huggingface.co/lcw99/llama-3-10b-wiki-240709-f",
147
- "Edentns/DataVortexS-10.7B-v0.4": "https://huggingface.co/Edentns/DataVortexS-10.7B-v0.4",
148
- "princeton-nlp/Llama-3-Instruct-8B-KTO": "https://huggingface.co/princeton-nlp/Llama-3-Instruct-8B-KTO",
149
- "spow12/kosolar_4.1_sft": "https://huggingface.co/spow12/kosolar_4.1_sft",
150
- "natong19/Qwen2-7B-Instruct-abliterated": "https://huggingface.co/natong19/Qwen2-7B-Instruct-abliterated",
151
- "megastudyedu/ME-dpo-7B-v1.1": "https://huggingface.co/megastudyedu/ME-dpo-7B-v1.1",
152
- "01-ai/Yi-1.5-9B-Chat-16K": "https://huggingface.co/01-ai/Yi-1.5-9B-Chat-16K",
153
- "Edentns/DataVortexS-10.7B-dpo-v0.1": "https://huggingface.co/Edentns/DataVortexS-10.7B-dpo-v0.1",
154
- "Alphacode-AI/AlphaMist7B-slr-v4-slow": "https://huggingface.co/Alphacode-AI/AlphaMist7B-slr-v4-slow",
155
- "chihoonlee10/T3Q-ko-solar-sft-dpo-v1.0": "https://huggingface.co/chihoonlee10/T3Q-ko-solar-sft-dpo-v1.0",
156
- "hwkwon/S-SOLAR-10.7B-v1.1": "https://huggingface.co/hwkwon/S-SOLAR-10.7B-v1.1",
157
- "DopeorNope/Dear_My_best_Friends-13B": "https://huggingface.co/DopeorNope/Dear_My_best_Friends-13B",
158
- "GyuHyeonWkdWkdMan/NAPS-llama-3.1-8b-instruct-v0.3.2": "https://huggingface.co/GyuHyeonWkdWkdMan/NAPS-llama-3.1-8b-instruct-v0.3.2",
159
- "PathFinderKR/Waktaverse-Llama-3-KO-8B-Instruct": "https://huggingface.co/PathFinderKR/Waktaverse-Llama-3-KO-8B-Instruct",
160
- "vicgalle/ConfigurableHermes-7B": "https://huggingface.co/vicgalle/ConfigurableHermes-7B",
161
- "maywell/PiVoT-10.7B-Mistral-v0.2": "https://huggingface.co/maywell/PiVoT-10.7B-Mistral-v0.2",
162
- "failspy/Meta-Llama-3-8B-Instruct-abliterated-v3": "https://huggingface.co/failspy/Meta-Llama-3-8B-Instruct-abliterated-v3",
163
- "lemon-mint/gemma-ko-7b-instruct-v0.50": "https://huggingface.co/lemon-mint/gemma-ko-7b-instruct-v0.50",
164
- "ENERGY-DRINK-LOVE/leaderboard_inst_v1.3_Open-Hermes_LDCC-SOLAR-10.7B_SFT": "https://huggingface.co/ENERGY-DRINK-LOVE/leaderboard_inst_v1.3_Open-Hermes_LDCC-SOLAR-10.7B_SFT",
165
- "maywell/PiVoT-0.1-early": "https://huggingface.co/maywell/PiVoT-0.1-early",
166
- "hwkwon/S-SOLAR-10.7B-v1.3": "https://huggingface.co/hwkwon/S-SOLAR-10.7B-v1.3",
167
- "werty1248/Llama-3-Ko-8B-Instruct-AOG": "https://huggingface.co/werty1248/Llama-3-Ko-8B-Instruct-AOG",
168
- "Alphacode-AI/AlphaMist7B-slr-v2": "https://huggingface.co/Alphacode-AI/AlphaMist7B-slr-v2",
169
- "maywell/koOpenChat-sft": "https://huggingface.co/maywell/koOpenChat-sft",
170
- "lemon-mint/gemma-7b-openhermes-v0.80": "https://huggingface.co/lemon-mint/gemma-7b-openhermes-v0.80",
171
- "VIRNECT/llama-3-Korean-8B-r-v1": "https://huggingface.co/VIRNECT/llama-3-Korean-8B-r-v1",
172
- "Alphacode-AI/AlphaMist7B-slr-v1": "https://huggingface.co/Alphacode-AI/AlphaMist7B-slr-v1",
173
- "Loyola/Mistral-7b-ITmodel": "https://huggingface.co/Loyola/Mistral-7b-ITmodel",
174
- "VIRNECT/llama-3-Korean-8B-r-v2": "https://huggingface.co/VIRNECT/llama-3-Korean-8B-r-v2",
175
- "NLPark/AnFeng_v3.1-Avocet": "https://huggingface.co/NLPark/AnFeng_v3.1-Avocet",
176
- "maywell/Synatra_TbST11B_EP01": "https://huggingface.co/maywell/Synatra_TbST11B_EP01",
177
- "GritLM/GritLM-7B-KTO": "https://huggingface.co/GritLM/GritLM-7B-KTO",
178
- "01-ai/Yi-34B-Chat": "https://huggingface.co/01-ai/Yi-34B-Chat",
179
- "ValiantLabs/Llama3.1-8B-ShiningValiant2": "https://huggingface.co/ValiantLabs/Llama3.1-8B-ShiningValiant2",
180
- "princeton-nlp/Llama-3-Base-8B-SFT-CPO": "https://huggingface.co/princeton-nlp/Llama-3-Base-8B-SFT-CPO",
181
- "hyokwan/hkcode_llama3_8b": "https://huggingface.co/hyokwan/hkcode_llama3_8b",
182
- "UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter3": "https://huggingface.co/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter3",
183
- "yuntaeyang/SOLAR-10.7B-Instructlora_sftt-v1.0": "https://huggingface.co/yuntaeyang/SOLAR-10.7B-Instructlora_sftt-v1.0",
184
- "juungwon/Llama-3-cs-LoRA": "https://huggingface.co/juungwon/Llama-3-cs-LoRA",
185
- "gangyeolkim/llama-3-chat": "https://huggingface.co/gangyeolkim/llama-3-chat",
186
- "mncai/llama2-13b-dpo-v3": "https://huggingface.co/mncai/llama2-13b-dpo-v3",
187
- "maywell/Synatra-Zephyr-7B-v0.01": "https://huggingface.co/maywell/Synatra-Zephyr-7B-v0.01",
188
- "ENERGY-DRINK-LOVE/leaderboard_inst_v1.3_deup_LDCC-SOLAR-10.7B_SFT": "https://huggingface.co/ENERGY-DRINK-LOVE/leaderboard_inst_v1.3_deup_LDCC-SOLAR-10.7B_SFT",
189
- "juungwon/Llama-3-constructionsafety-LoRA": "https://huggingface.co/juungwon/Llama-3-constructionsafety-LoRA",
190
- "princeton-nlp/Mistral-7B-Base-SFT-SimPO": "https://huggingface.co/princeton-nlp/Mistral-7B-Base-SFT-SimPO",
191
- "moondriller/solar10B-eugeneparkthebestv2": "https://huggingface.co/moondriller/solar10B-eugeneparkthebestv2",
192
- "chlee10/T3Q-LLM3-Llama3-sft1.0-dpo1.0": "https://huggingface.co/chlee10/T3Q-LLM3-Llama3-sft1.0-dpo1.0",
193
- "Edentns/DataVortexS-10.7B-dpo-v1.7": "https://huggingface.co/Edentns/DataVortexS-10.7B-dpo-v1.7",
194
- "gamzadole/llama3_instruct_tuning_without_pretraing": "https://huggingface.co/gamzadole/llama3_instruct_tuning_without_pretraing",
195
- "saltlux/Ko-Llama3-Luxia-8B": "https://huggingface.co/saltlux/Ko-Llama3-Luxia-8B",
196
- "kimdeokgi/ko-pt-model-test1": "https://huggingface.co/kimdeokgi/ko-pt-model-test1",
197
- "maywell/Synatra-11B-Testbench-2": "https://huggingface.co/maywell/Synatra-11B-Testbench-2",
198
- "Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO": "https://huggingface.co/Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO",
199
- "vicgalle/Configurable-Mistral-7B": "https://huggingface.co/vicgalle/Configurable-Mistral-7B",
200
- "ENERGY-DRINK-LOVE/leaderboard_inst_v1.5_LDCC-SOLAR-10.7B_SFT": "https://huggingface.co/ENERGY-DRINK-LOVE/leaderboard_inst_v1.5_LDCC-SOLAR-10.7B_SFT",
201
- "beomi/Llama-3-Open-Ko-8B-Instruct-preview": "https://huggingface.co/beomi/Llama-3-Open-Ko-8B-Instruct-preview",
202
- "Edentns/DataVortexS-10.7B-dpo-v1.3": "https://huggingface.co/Edentns/DataVortexS-10.7B-dpo-v1.3",
203
- "spow12/Llama3_ko_4.2_sft": "https://huggingface.co/spow12/Llama3_ko_4.2_sft",
204
- "maywell/Llama-3-Ko-8B-Instruct": "https://huggingface.co/maywell/Llama-3-Ko-8B-Instruct",
205
- "T3Q-LLM/T3Q-LLM3-NC-v1.0": "https://huggingface.co/T3Q-LLM/T3Q-LLM3-NC-v1.0",
206
- "ehartford/dolphin-2.2.1-mistral-7b": "https://huggingface.co/ehartford/dolphin-2.2.1-mistral-7b",
207
- "hwkwon/S-SOLAR-10.7B-SFT-v1.3": "https://huggingface.co/hwkwon/S-SOLAR-10.7B-SFT-v1.3",
208
- "sel303/llama3-instruct-diverce-v2.0": "https://huggingface.co/sel303/llama3-instruct-diverce-v2.0",
209
- "4yo1/llama3-eng-ko-8b-sl3": "https://huggingface.co/4yo1/llama3-eng-ko-8b-sl3",
210
- "hkss/hk-SOLAR-10.7B-v1.1": "https://huggingface.co/hkss/hk-SOLAR-10.7B-v1.1",
211
- "Open-Orca/Mistral-7B-OpenOrca": "https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca",
212
- "hyokwan/familidata": "https://huggingface.co/hyokwan/familidata",
213
- "uukuguy/zephyr-7b-alpha-dare-0.85": "https://huggingface.co/uukuguy/zephyr-7b-alpha-dare-0.85",
214
- "gwonny/nox-solar-10.7b-v4-kolon-all-5": "https://huggingface.co/gwonny/nox-solar-10.7b-v4-kolon-all-5",
215
- "shleeeee/mistral-ko-tech-science-v1": "https://huggingface.co/shleeeee/mistral-ko-tech-science-v1",
216
- "Deepnoid/deep-solar-eeve-KorSTS": "https://huggingface.co/Deepnoid/deep-solar-eeve-KorSTS",
217
- "AIdenU/Mistral-7B-v0.2-ko-Y24_v1.0": "https://huggingface.co/AIdenU/Mistral-7B-v0.2-ko-Y24_v1.0",
218
- "tlphams/gollm-tendency-45": "https://huggingface.co/tlphams/gollm-tendency-45",
219
- "realPCH/ko_solra_merge": "https://huggingface.co/realPCH/ko_solra_merge",
220
- "Cartinoe5930/original-KoRAE-13b": "https://huggingface.co/Cartinoe5930/original-KoRAE-13b",
221
- "GAI-LLM/Yi-Ko-6B-dpo-v5": "https://huggingface.co/GAI-LLM/Yi-Ko-6B-dpo-v5",
222
- "Minirecord/Mini_DPO_test02": "https://huggingface.co/Minirecord/Mini_DPO_test02",
223
- "AIJUUD/juud-Mistral-7B-dpo": "https://huggingface.co/AIJUUD/juud-Mistral-7B-dpo",
224
- "gwonny/nox-solar-10.7b-v4-kolon-all-10": "https://huggingface.co/gwonny/nox-solar-10.7b-v4-kolon-all-10",
225
- "jieunhan/TEST_MODEL": "https://huggingface.co/jieunhan/TEST_MODEL",
226
- "etri-xainlp/kor-llama2-13b-dpo": "https://huggingface.co/etri-xainlp/kor-llama2-13b-dpo",
227
- "ifuseok/yi-ko-playtus-instruct-v0.2": "https://huggingface.co/ifuseok/yi-ko-playtus-instruct-v0.2",
228
- "Cartinoe5930/original-KoRAE-13b-3ep": "https://huggingface.co/Cartinoe5930/original-KoRAE-13b-3ep",
229
- "Trofish/KULLM-RLHF": "https://huggingface.co/Trofish/KULLM-RLHF",
230
- "wkshin89/Yi-Ko-6B-Instruct-v1.0": "https://huggingface.co/wkshin89/Yi-Ko-6B-Instruct-v1.0",
231
- "momo/polyglot-ko-12.8b-Chat-QLoRA-Merge": "https://huggingface.co/momo/polyglot-ko-12.8b-Chat-QLoRA-Merge",
232
- "PracticeLLM/Custom-KoLLM-13B-v5": "https://huggingface.co/PracticeLLM/Custom-KoLLM-13B-v5",
233
- "BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B": "https://huggingface.co/BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B",
234
- "MRAIRR/minillama3_8b_all": "https://huggingface.co/MRAIRR/minillama3_8b_all",
235
- "failspy/Phi-3-medium-4k-instruct-abliterated-v3": "https://huggingface.co/failspy/Phi-3-medium-4k-instruct-abliterated-v3",
236
- "DILAB-HYU/koquality-polyglot-12.8b": "https://huggingface.co/DILAB-HYU/koquality-polyglot-12.8b",
237
- "kyujinpy/Korean-OpenOrca-v3": "https://huggingface.co/kyujinpy/Korean-OpenOrca-v3",
238
- "4yo1/llama3-eng-ko-8b": "https://huggingface.co/4yo1/llama3-eng-ko-8b",
239
- "4yo1/llama3-eng-ko-8": "https://huggingface.co/4yo1/llama3-eng-ko-8",
240
- "4yo1/llama3-eng-ko-8-llama": "https://huggingface.co/4yo1/llama3-eng-ko-8-llama",
241
- "PracticeLLM/Custom-KoLLM-13B-v2": "https://huggingface.co/PracticeLLM/Custom-KoLLM-13B-v2",
242
- "kyujinpy/KOR-Orca-Platypus-13B-v2": "https://huggingface.co/kyujinpy/KOR-Orca-Platypus-13B-v2",
243
- "ghost-x/ghost-7b-alpha": "https://huggingface.co/ghost-x/ghost-7b-alpha",
244
- "HumanF-MarkrAI/pub-llama-13B-v6": "https://huggingface.co/HumanF-MarkrAI/pub-llama-13B-v6",
245
- "nlpai-lab/kullm-polyglot-5.8b-v2": "https://huggingface.co/nlpai-lab/kullm-polyglot-5.8b-v2",
246
- "maywell/Synatra-42dot-1.3B": "https://huggingface.co/maywell/Synatra-42dot-1.3B",
247
- "yhkim9362/gemma-en-ko-7b-v0.1": "https://huggingface.co/yhkim9362/gemma-en-ko-7b-v0.1",
248
- "yhkim9362/gemma-en-ko-7b-v0.2": "https://huggingface.co/yhkim9362/gemma-en-ko-7b-v0.2",
249
- "daekeun-ml/Llama-2-ko-OpenOrca-gugugo-13B": "https://huggingface.co/daekeun-ml/Llama-2-ko-OpenOrca-gugugo-13B",
250
- "beomi/Yi-Ko-6B": "https://huggingface.co/beomi/Yi-Ko-6B",
251
- "jojo0217/ChatSKKU5.8B": "https://huggingface.co/jojo0217/ChatSKKU5.8B",
252
- "Deepnoid/deep-solar-v2.0.7": "https://huggingface.co/Deepnoid/deep-solar-v2.0.7",
253
- "01-ai/Yi-1.5-9B": "https://huggingface.co/01-ai/Yi-1.5-9B",
254
- "PracticeLLM/Custom-KoLLM-13B-v4": "https://huggingface.co/PracticeLLM/Custom-KoLLM-13B-v4",
255
- "nuebaek/komt_mistral_mss_user_0_max_steps_80": "https://huggingface.co/nuebaek/komt_mistral_mss_user_0_max_steps_80",
256
- "dltjdgh0928/lsh_finetune_v0.11": "https://huggingface.co/dltjdgh0928/lsh_finetune_v0.11",
257
- "shleeeee/mistral-7b-wiki": "https://huggingface.co/shleeeee/mistral-7b-wiki",
258
- "nayohan/polyglot-ko-5.8b-Inst": "https://huggingface.co/nayohan/polyglot-ko-5.8b-Inst",
259
- "ifuseok/sft-solar-10.7b-v1.1": "https://huggingface.co/ifuseok/sft-solar-10.7b-v1.1",
260
- "Junmai/KIT-5.8b": "https://huggingface.co/Junmai/KIT-5.8b",
261
- "heegyu/polyglot-ko-3.8b-chat": "https://huggingface.co/heegyu/polyglot-ko-3.8b-chat",
262
- "etri-xainlp/polyglot-ko-12.8b-instruct": "https://huggingface.co/etri-xainlp/polyglot-ko-12.8b-instruct",
263
- "OpenBuddy/openbuddy-mistral2-7b-v20.3-32k": "https://huggingface.co/OpenBuddy/openbuddy-mistral2-7b-v20.3-32k",
264
- "sh2orc/Llama-3-Korean-8B": "https://huggingface.co/sh2orc/Llama-3-Korean-8B",
265
- "Deepnoid/deep-solar-eeve-v2.0.0": "https://huggingface.co/Deepnoid/deep-solar-eeve-v2.0.0",
266
- "Herry443/Mistral-7B-KNUT-ref": "https://huggingface.co/Herry443/Mistral-7B-KNUT-ref",
267
- "heegyu/polyglot-ko-5.8b-chat": "https://huggingface.co/heegyu/polyglot-ko-5.8b-chat",
268
- "jungyuko/DAVinCI-42dot_LLM-PLM-1.3B-v1.5.3": "https://huggingface.co/jungyuko/DAVinCI-42dot_LLM-PLM-1.3B-v1.5.3",
269
- "DILAB-HYU/KoQuality-Polyglot-5.8b": "https://huggingface.co/DILAB-HYU/KoQuality-Polyglot-5.8b",
270
- "Byungchae/k2s3_test_0000": "https://huggingface.co/Byungchae/k2s3_test_0000",
271
- "migtissera/Tess-v2.5-Phi-3-medium-128k-14B": "https://huggingface.co/migtissera/Tess-v2.5-Phi-3-medium-128k-14B",
272
- "kyujinpy/Korean-OpenOrca-13B": "https://huggingface.co/kyujinpy/Korean-OpenOrca-13B",
273
- "kyujinpy/KO-Platypus2-13B": "https://huggingface.co/kyujinpy/KO-Platypus2-13B",
274
- "jin05102518/Astral-7B-Instruct-v0.01": "https://huggingface.co/jin05102518/Astral-7B-Instruct-v0.01",
275
- "Byungchae/k2s3_test_0002": "https://huggingface.co/Byungchae/k2s3_test_0002",
276
- "NousResearch/Nous-Hermes-llama-2-7b": "https://huggingface.co/NousResearch/Nous-Hermes-llama-2-7b",
277
- "kaist-ai/prometheus-13b-v1.0": "https://huggingface.co/kaist-ai/prometheus-13b-v1.0",
278
- "sel303/llama3-diverce-ver1.0": "https://huggingface.co/sel303/llama3-diverce-ver1.0",
279
- "NousResearch/Nous-Capybara-7B": "https://huggingface.co/NousResearch/Nous-Capybara-7B",
280
- "rrw-x2/KoSOLAR-10.7B-DPO-v1.0": "https://huggingface.co/rrw-x2/KoSOLAR-10.7B-DPO-v1.0",
281
- "Edentns/DataVortexS-10.7B-v0.2": "https://huggingface.co/Edentns/DataVortexS-10.7B-v0.2",
282
- "Jsoo/Llama3-beomi-Open-Ko-8B-Instruct-preview-test6": "https://huggingface.co/Jsoo/Llama3-beomi-Open-Ko-8B-Instruct-preview-test6",
283
- "tlphams/gollm-instruct-all-in-one-v1": "https://huggingface.co/tlphams/gollm-instruct-all-in-one-v1",
284
- "Edentns/DataVortexTL-1.1B-v0.1": "https://huggingface.co/Edentns/DataVortexTL-1.1B-v0.1",
285
- "richard-park/llama3-pre1-ds": "https://huggingface.co/richard-park/llama3-pre1-ds",
286
- "ehartford/samantha-1.1-llama-33b": "https://huggingface.co/ehartford/samantha-1.1-llama-33b",
287
- "heegyu/LIMA-13b-hf": "https://huggingface.co/heegyu/LIMA-13b-hf",
288
- "heegyu/42dot_LLM-PLM-1.3B-mt": "https://huggingface.co/heegyu/42dot_LLM-PLM-1.3B-mt",
289
- "shleeeee/mistral-ko-7b-wiki-neft": "https://huggingface.co/shleeeee/mistral-ko-7b-wiki-neft",
290
- "EleutherAI/polyglot-ko-1.3b": "https://huggingface.co/EleutherAI/polyglot-ko-1.3b",
291
- "kyujinpy/Ko-PlatYi-6B-gu": "https://huggingface.co/kyujinpy/Ko-PlatYi-6B-gu",
292
- "sel303/llama3-diverce-ver1.6": "https://huggingface.co/sel303/llama3-diverce-ver1.6"
293
- }
294
-
295
-
296
- def get_models_data(progress=gr.Progress()):
297
- """모델 데이터 가져오기"""
298
- def normalize_model_id(model_id):
299
- """모델 ID를 정규화"""
300
- return model_id.strip().lower()
301
-
302
- url = "https://huggingface.co/api/models"
303
-
304
- try:
305
- progress(0, desc="Fetching models data...")
306
- params = {
307
- 'full': 'true',
308
- 'limit': 3000, # 3000개로 증가
309
- 'sort': 'trending',
310
- 'direction': -1
311
- }
312
-
313
- headers = {'Accept': 'application/json'}
314
-
315
- response = requests.get(url, params=params, headers=headers)
316
- if response.status_code != 200:
317
- print(f"API 요청 실패: {response.status_code}")
318
- print(f"Response: {response.text}")
319
- return create_error_plot(), "<div>모델 데이터를 가져오는데 실패했습니다.</div>", pd.DataFrame()
320
-
321
- models = response.json()
322
-
323
- # 전체 순위 정보 저장 (다운로드 수 기준)
324
- model_ranks = {}
325
- model_data = {} # 모든 모델의 상세 데이터 저장
326
-
327
- for idx, model in enumerate(models, 1):
328
- model_id = normalize_model_id(model.get('id', ''))
329
- model_data[model_id] = {
330
- 'rank': idx,
331
- 'downloads': model.get('downloads', 0),
332
- 'likes': model.get('likes', 0),
333
- 'title': model.get('title', 'No Title')
334
- }
335
-
336
- # target_models 중 순위권 내 모델 필터링
337
- filtered_models = []
338
- for target_id in target_models.keys():
339
- normalized_target_id = normalize_model_id(target_id)
340
-
341
- # 먼저 전체 순위에서 찾기
342
- if normalized_target_id in model_data:
343
- model_info = {
344
- 'id': target_id,
345
- 'rank': model_data[normalized_target_id]['rank'],
346
- 'downloads': model_data[normalized_target_id]['downloads'],
347
- 'likes': model_data[normalized_target_id]['likes'],
348
- 'title': model_data[normalized_target_id]['title']
349
- }
350
- else:
351
- # 순위권 밖의 모델은 개별 API 호출로 정보 가져오기
352
- try:
353
- model_url = f"https://huggingface.co/api/models/{target_id}"
354
- model_response = requests.get(model_url, headers=headers)
355
- if model_response.status_code == 200:
356
- model_info = model_response.json()
357
- model_info['id'] = target_id
358
- model_info['rank'] = 'Not in top 3000'
359
- else:
360
- model_info = {
361
- 'id': target_id,
362
- 'rank': 'Not in top 3000',
363
- 'downloads': 0,
364
- 'likes': 0,
365
- 'title': 'No Title'
366
- }
367
- except Exception as e:
368
- print(f"Error fetching data for model {target_id}: {str(e)}")
369
- model_info = {
370
- 'id': target_id,
371
- 'rank': 'Not in top 3000',
372
- 'downloads': 0,
373
- 'likes': 0,
374
- 'title': 'No Title'
375
- }
376
-
377
- filtered_models.append(model_info)
378
-
379
- # 순위로 정렬 (순위가 숫자인 경우만)
380
- filtered_models.sort(key=lambda x: (
381
- float('inf') if x['rank'] == 'Not in top 3000' else x['rank']
382
- ))
383
-
384
- if not filtered_models:
385
- return create_error_plot(), "<div>선택된 모델의 데이터를 찾을 수 없습니다.</div>", pd.DataFrame()
386
-
387
- progress(0.3, desc="Creating visualization...")
388
-
389
- # 시각화 생성
390
- fig = go.Figure()
391
-
392
- # 데이터 준비
393
- ids = [model['id'] for model in filtered_models]
394
- ranks = [model['rank'] for model in filtered_models]
395
- likes = [model['likes'] for model in filtered_models]
396
- downloads = [model['downloads'] for model in filtered_models]
397
-
398
- # Y축 값을 반전 (숫자 순위만)
399
- y_values = [3001 - r if isinstance(r, int) else 0 for r in ranks]
400
-
401
- # 막대 그래프 생성
402
- fig.add_trace(go.Bar(
403
- x=ids,
404
- y=y_values,
405
- text=[f"Global Rank: {r}<br>Likes: {l:,}<br>Downloads: {d:,}"
406
- for r, l, d in zip(ranks, likes, downloads)],
407
- textposition='auto',
408
- marker_color='rgb(158,202,225)',
409
- opacity=0.8
410
- ))
411
-
412
- fig.update_layout(
413
- title={
414
- 'text': 'Hugging Face Models Global Download Rankings (Top 3000)',
415
- 'y':0.95,
416
- 'x':0.5,
417
- 'xanchor': 'center',
418
- 'yanchor': 'top'
419
- },
420
- xaxis_title='Model ID',
421
- yaxis_title='Global Rank',
422
- yaxis=dict(
423
- ticktext=[str(i) for i in range(1, 3001, 150)],
424
- tickvals=[3001 - i for i in range(1, 3001, 150)],
425
- range=[0, 3000]
426
- ),
427
- height=800,
428
- showlegend=False,
429
- template='plotly_white',
430
- xaxis_tickangle=-45
431
- )
432
-
433
- progress(0.6, desc="Creating model cards...")
434
-
435
- # HTML 카드 생성
436
- html_content = """
437
- <div style='padding: 20px; background: #f5f5f5;'>
438
- <h2 style='color: #2c3e50;'>Models Global Download Rankings (Top 3000)</h2>
439
- <div style='display: grid; grid-template-columns: repeat(auto-fill, minmax(300px, 1fr)); gap: 20px;'>
440
- """
441
-
442
- # 순위권 내 모델 카드 생성
443
- for model in filtered_models:
444
- model_id = model['id']
445
- rank = model['rank']
446
- likes = model.get('likes', 0)
447
- downloads = model.get('downloads', 0)
448
- title = model.get('title', 'No Title')
449
-
450
- html_content += f"""
451
- <div style='
452
- background: white;
453
- padding: 20px;
454
- border-radius: 10px;
455
- box-shadow: 0 2px 4px rgba(0,0,0,0.1);
456
- transition: transform 0.2s;
457
- '>
458
- <h3 style='color: #34495e;'>Global Rank #{rank} - {model_id}</h3>
459
- <p style='color: #2c3e50;'>{title}</p>
460
- <p style='color: #7f8c8d;'>👍 Likes: {likes:,}</p>
461
- <p style='color: #7f8c8d;'>⬇️ Downloads: {downloads:,}</p>
462
- <a href='{target_models[model_id]}'
463
- target='_blank'
464
- style='
465
- display: inline-block;
466
- padding: 8px 16px;
467
- background: #3498db;
468
- color: white;
469
- text-decoration: none;
470
- border-radius: 5px;
471
- transition: background 0.3s;
472
- '>
473
- Visit Model 🔗
474
- </a>
475
- </div>
476
- """
477
-
478
- html_content += "</div></div>"
479
-
480
- # 데이터프레임 생성
481
- df_data = []
482
- # 모든 모델 정보를 데이터프레임에 추가
483
- for model in filtered_models:
484
- df_data.append({
485
- 'Global Rank': model['rank'],
486
- 'Model ID': model['id'],
487
- 'Title': model.get('title', 'No Title'),
488
- 'Likes': f"{model.get('likes', 0):,}",
489
- 'Downloads': f"{model.get('downloads', 0):,}",
490
- 'URL': target_models[model['id']]
491
- })
492
-
493
- df = pd.DataFrame(df_data)
494
-
495
- progress(1.0, desc="Complete!")
496
- return fig, html_content, df
497
-
498
- except Exception as e:
499
- print(f"Error in get_models_data: {str(e)}")
500
- return create_error_plot(), f"<div>에러 발생: {str(e)}</div>", pd.DataFrame()
501
-
502
- # 관심 스페이스 URL 리스트와 정보
503
- target_spaces = {
504
-
505
- "openfree/Korean-Leaderboard": "https://huggingface.co/spaces/openfree/Korean-Leaderboard",
506
- "ginipick/FLUXllama": "https://huggingface.co/spaces/ginipick/FLUXllama",
507
- "ginipick/SORA-3D": "https://huggingface.co/spaces/ginipick/SORA-3D",
508
- "fantaxy/Sound-AI-SFX": "https://huggingface.co/spaces/fantaxy/Sound-AI-SFX",
509
- "fantos/flx8lora": "https://huggingface.co/spaces/fantos/flx8lora",
510
- "ginigen/Canvas": "https://huggingface.co/spaces/ginigen/Canvas",
511
- "fantaxy/erotica": "https://huggingface.co/spaces/fantaxy/erotica",
512
- "ginipick/time-machine": "https://huggingface.co/spaces/ginipick/time-machine",
513
- "aiqcamp/FLUX-VisionReply": "https://huggingface.co/spaces/aiqcamp/FLUX-VisionReply",
514
- "openfree/Tetris-Game": "https://huggingface.co/spaces/openfree/Tetris-Game",
515
- "openfree/everychat": "https://huggingface.co/spaces/openfree/everychat",
516
- "VIDraft/mouse1": "https://huggingface.co/spaces/VIDraft/mouse1",
517
- "kolaslab/alpha-go": "https://huggingface.co/spaces/kolaslab/alpha-go",
518
- "ginipick/text3d": "https://huggingface.co/spaces/ginipick/text3d",
519
- "openfree/trending-board": "https://huggingface.co/spaces/openfree/trending-board",
520
- "cutechicken/tankwar": "https://huggingface.co/spaces/cutechicken/tankwar",
521
- "openfree/game-jewel": "https://huggingface.co/spaces/openfree/game-jewel",
522
- "VIDraft/mouse-chat": "https://huggingface.co/spaces/VIDraft/mouse-chat",
523
- "ginipick/AccDiffusion": "https://huggingface.co/spaces/ginipick/AccDiffusion",
524
- "aiqtech/Particle-Accelerator-Simulation": "https://huggingface.co/spaces/aiqtech/Particle-Accelerator-Simulation",
525
- "openfree/GiniGEN": "https://huggingface.co/spaces/openfree/GiniGEN",
526
- "kolaslab/3DAudio-Spectrum-Analyzer": "https://huggingface.co/spaces/kolaslab/3DAudio-Spectrum-Analyzer",
527
- "openfree/trending-news-24": "https://huggingface.co/spaces/openfree/trending-news-24",
528
- "ginipick/Realtime-FLUX": "https://huggingface.co/spaces/ginipick/Realtime-FLUX",
529
- "VIDraft/prime-number": "https://huggingface.co/spaces/VIDraft/prime-number",
530
- "kolaslab/zombie-game": "https://huggingface.co/spaces/kolaslab/zombie-game",
531
- "fantos/miro-game": "https://huggingface.co/spaces/fantos/miro-game",
532
- "kolaslab/shooting": "https://huggingface.co/spaces/kolaslab/shooting",
533
- "VIDraft/Mouse-Hackathon": "https://huggingface.co/spaces/VIDraft/Mouse-Hackathon",
534
- "upstage/open-ko-llm-leaderboard": "https://huggingface.co/spaces/upstage/open-ko-llm-leaderboard",
535
- "LGAI-EXAONE/EXAONE-3.5-Instruct-Demo": "https://huggingface.co/spaces/LGAI-EXAONE/EXAONE-3.5-Instruct-Demo",
536
-
537
- "cutechicken/TankWar3D": "https://huggingface.co/spaces/cutechicken/TankWar3D",
538
- "kolaslab/RC4-EnDecoder": "https://huggingface.co/spaces/kolaslab/RC4-EnDecoder",
539
- "kolaslab/simulator": "https://huggingface.co/spaces/kolaslab/simulator",
540
- "kolaslab/calculator": "https://huggingface.co/spaces/kolaslab/calculator",
541
- "etri-vilab/Ko-LLaVA": "https://huggingface.co/spaces/etri-vilab/Ko-LLaVA",
542
- "etri-vilab/KOALA": "https://huggingface.co/spaces/etri-vilab/KOALA",
543
- "naver-clova-ix/donut-base-finetuned-cord-v2": "https://huggingface.co/spaces/naver-clova-ix/donut-base-finetuned-cord-v2",
544
-
545
- "NCSOFT/VARCO_Arena": "https://huggingface.co/spaces/NCSOFT/VARCO_Arena"
546
- }
547
-
548
- def get_spaces_data(sort_type="trending", progress=gr.Progress()):
549
- """스페이스 데이터 가져오기 (trending 또는 modes)"""
550
- url = "https://huggingface.co/api/spaces"
551
- params = {
552
- 'full': 'true',
553
- 'limit': 300
554
- }
555
-
556
- if sort_type == "modes":
557
- params['sort'] = 'likes'
558
-
559
- try:
560
- progress(0, desc=f"Fetching {sort_type} spaces data...")
561
- response = requests.get(url, params=params)
562
- response.raise_for_status()
563
- all_spaces = response.json()
564
-
565
- # 순위 정보 저장
566
- space_ranks = {}
567
- for idx, space in enumerate(all_spaces, 1):
568
- space_id = space.get('id', '')
569
- if space_id in target_spaces:
570
- space['rank'] = idx
571
- space_ranks[space_id] = space
572
-
573
- spaces = [space_ranks[space_id] for space_id in space_ranks.keys()]
574
- spaces.sort(key=lambda x: x['rank'])
575
-
576
- progress(0.3, desc="Creating visualization...")
577
-
578
- # 시각화 생성
579
- fig = go.Figure()
580
-
581
- # 데이터 준비
582
- ids = [space['id'] for space in spaces]
583
- ranks = [space['rank'] for space in spaces]
584
- likes = [space.get('likes', 0) for space in spaces]
585
- titles = [space.get('cardData', {}).get('title') or space.get('title', 'No Title') for space in spaces]
586
-
587
- # Y축 값을 반전
588
- y_values = [301 - r for r in ranks]
589
-
590
- # 막대 그래프 생성
591
- fig.add_trace(go.Bar(
592
- x=ids,
593
- y=y_values,
594
- text=[f"Rank: {r}<br>Title: {t}<br>Likes: {l}"
595
- for r, t, l in zip(ranks, titles, likes)],
596
- textposition='auto',
597
- marker_color='rgb(158,202,225)',
598
- opacity=0.8
599
- ))
600
-
601
- fig.update_layout(
602
- title={
603
- 'text': f'Hugging Face Spaces {sort_type.title()} Rankings (Top 300)',
604
- 'y':0.95,
605
- 'x':0.5,
606
- 'xanchor': 'center',
607
- 'yanchor': 'top'
608
- },
609
- xaxis_title='Space ID',
610
- yaxis_title='Rank',
611
- yaxis=dict(
612
- ticktext=[str(i) for i in range(1, 301, 20)],
613
- tickvals=[301 - i for i in range(1, 301, 20)],
614
- range=[0, 300]
615
- ),
616
- height=800,
617
- showlegend=False,
618
- template='plotly_white',
619
- xaxis_tickangle=-45
620
- )
621
-
622
- progress(0.6, desc="Creating space cards...")
623
-
624
- # HTML 카드 생성
625
- html_content = f"""
626
- <div style='padding: 20px; background: #f5f5f5;'>
627
- <h2 style='color: #2c3e50;'>{sort_type.title()} Rankings</h2>
628
- <div style='display: grid; grid-template-columns: repeat(auto-fill, minmax(300px, 1fr)); gap: 20px;'>
629
- """
630
-
631
- for space in spaces:
632
- space_id = space['id']
633
- rank = space['rank']
634
- title = space.get('cardData', {}).get('title') or space.get('title', 'No Title')
635
- likes = space.get('likes', 0)
636
-
637
- # 스페이스 함수의 HTML 카드 생성 부분 수정
638
- html_content += f"""
639
- <div style='
640
- background: white;
641
- padding: 20px;
642
- border-radius: 10px;
643
- box-shadow: 0 2px 4px rgba(0,0,0,0.1);
644
- transition: transform 0.2s;
645
- '>
646
- <h3 style='color: #34495e;'>Rank #{rank} - {space_id}</h3>
647
- <h4 style='
648
- color: #2980b9;
649
- margin: 10px 0;
650
- font-size: 1.2em;
651
- font-weight: bold;
652
- text-shadow: 1px 1px 2px rgba(0,0,0,0.1);
653
- background: linear-gradient(to right, #3498db, #2980b9);
654
- -webkit-background-clip: text;
655
- -webkit-text-fill-color: transparent;
656
- padding: 5px 0;
657
- '>{title}</h4>
658
- <p style='color: #7f8c8d; margin-bottom: 10px;'>👍 Likes: {likes}</p>
659
- <a href='{target_spaces[space_id]}'
660
- target='_blank'
661
- style='
662
- display: inline-block;
663
- padding: 8px 16px;
664
- background: #3498db;
665
- color: white;
666
- text-decoration: none;
667
- border-radius: 5px;
668
- transition: background 0.3s;
669
- '>
670
- Visit Space 🔗
671
- </a>
672
- </div>
673
- """
674
-
675
-
676
-
677
- html_content += "</div></div>"
678
-
679
- # 데이터프레임 생성
680
- df = pd.DataFrame([{
681
- 'Rank': space['rank'],
682
- 'Space ID': space['id'],
683
- 'Title': space.get('cardData', {}).get('title') or space.get('title', 'No Title'),
684
- 'Likes': space.get('likes', 0),
685
- 'URL': target_spaces[space['id']]
686
- } for space in spaces])
687
-
688
- progress(1.0, desc="Complete!")
689
- return fig, html_content, df
690
-
691
- except Exception as e:
692
- print(f"Error in get_spaces_data: {str(e)}")
693
- error_html = f'<div style="color: red; padding: 20px;">Error: {str(e)}</div>'
694
- error_plot = create_error_plot()
695
- return error_plot, error_html, pd.DataFrame()
696
-
697
-
698
- def create_trend_visualization(spaces_data):
699
- if not spaces_data:
700
- return create_error_plot()
701
-
702
- fig = go.Figure()
703
-
704
- # 순위 데이터 준비
705
- ranks = []
706
- for idx, space in enumerate(spaces_data, 1):
707
- space_id = space.get('id', '')
708
- if space_id in target_spaces:
709
- ranks.append({
710
- 'id': space_id,
711
- 'rank': idx,
712
- 'likes': space.get('likes', 0),
713
- 'title': space.get('title', 'N/A'),
714
- 'views': space.get('views', 0)
715
- })
716
-
717
- if not ranks:
718
- return create_error_plot()
719
-
720
- # 순위별로 정렬
721
- ranks.sort(key=lambda x: x['rank'])
722
-
723
- # 플롯 데이터 생성
724
- ids = [r['id'] for r in ranks]
725
- rank_values = [r['rank'] for r in ranks]
726
- likes = [r['likes'] for r in ranks]
727
- views = [r['views'] for r in ranks]
728
-
729
- # 막대 그래프 생성
730
- fig.add_trace(go.Bar(
731
- x=ids,
732
- y=rank_values,
733
- text=[f"Rank: {r}<br>Likes: {l}<br>Views: {v}" for r, l, v in zip(rank_values, likes, views)],
734
- textposition='auto',
735
- marker_color='rgb(158,202,225)',
736
- opacity=0.8
737
- ))
738
-
739
- fig.update_layout(
740
- title={
741
- 'text': 'Current Trending Ranks (All Target Spaces)',
742
- 'y':0.95,
743
- 'x':0.5,
744
- 'xanchor': 'center',
745
- 'yanchor': 'top'
746
- },
747
- xaxis_title='Space ID',
748
- yaxis_title='Trending Rank',
749
- yaxis_autorange='reversed',
750
- height=800,
751
- showlegend=False,
752
- template='plotly_white',
753
- xaxis_tickangle=-45
754
- )
755
-
756
- return fig
757
-
758
- # 토큰이 없는 경우를 위한 대체 함수
759
- def get_trending_spaces_without_token():
760
- try:
761
- url = "https://huggingface.co/api/spaces"
762
- params = {
763
- 'sort': 'likes',
764
- 'direction': -1,
765
- 'limit': 1000,
766
- 'full': 'true'
767
- }
768
-
769
- response = requests.get(url, params=params)
770
-
771
- if response.status_code == 200:
772
- return response.json()
773
- else:
774
- print(f"API 요청 실패 (토큰 없음): {response.status_code}")
775
- print(f"Response: {response.text}")
776
- return None
777
- except Exception as e:
778
- print(f"API 호출 중 에러 발생 (토큰 없음): {str(e)}")
779
- return None
780
-
781
- # API 토큰 설정 및 함수 선택
782
- if not HF_TOKEN:
783
- get_trending_spaces = get_trending_spaces_without_token
784
-
785
-
786
-
787
- def create_error_plot():
788
- fig = go.Figure()
789
- fig.add_annotation(
790
- text="데이터를 불러올 수 없습니다.\n(API 인증이 필요합니다)",
791
- xref="paper",
792
- yref="paper",
793
- x=0.5,
794
- y=0.5,
795
- showarrow=False,
796
- font=dict(size=20)
797
- )
798
- fig.update_layout(
799
- title="Error Loading Data",
800
- height=400
801
- )
802
- return fig
803
-
804
-
805
- def create_space_info_html(spaces_data):
806
- if not spaces_data:
807
- return "<div style='padding: 20px;'><h2>데이터를 불러오는데 실패했습니다.</h2></div>"
808
-
809
- html_content = """
810
- <div style='padding: 20px;'>
811
- <h2 style='color: #2c3e50;'>Current Trending Rankings</h2>
812
- <div style='display: grid; grid-template-columns: repeat(auto-fill, minmax(300px, 1fr)); gap: 20px;'>
813
- """
814
-
815
- # 모든 target spaces를 포함하도록 수정
816
- for space_id in target_spaces.keys():
817
- space_info = next((s for s in spaces_data if s.get('id') == space_id), None)
818
- if space_info:
819
- rank = next((idx for idx, s in enumerate(spaces_data, 1) if s.get('id') == space_id), 'N/A')
820
- html_content += f"""
821
- <div style='
822
- background: white;
823
- padding: 20px;
824
- border-radius: 10px;
825
- box-shadow: 0 2px 4px rgba(0,0,0,0.1);
826
- transition: transform 0.2s;
827
- '>
828
- <h3 style='color: #34495e;'>#{rank} - {space_id}</h3>
829
- <p style='color: #7f8c8d;'>👍 Likes: {space_info.get('likes', 'N/A')}</p>
830
- <p style='color: #7f8c8d;'>👀 Views: {space_info.get('views', 'N/A')}</p>
831
- <p style='color: #2c3e50;'>{space_info.get('title', 'N/A')}</p>
832
- <p style='color: #7f8c8d; font-size: 0.9em;'>{space_info.get('description', 'N/A')[:100]}...</p>
833
- <a href='{target_spaces[space_id]}'
834
- target='_blank'
835
- style='
836
- display: inline-block;
837
- padding: 8px 16px;
838
- background: #3498db;
839
- color: white;
840
- text-decoration: none;
841
- border-radius: 5px;
842
- transition: background 0.3s;
843
- '>
844
- Visit Space 🔗
845
- </a>
846
- </div>
847
- """
848
- else:
849
- html_content += f"""
850
- <div style='
851
- background: #f8f9fa;
852
- padding: 20px;
853
- border-radius: 10px;
854
- box-shadow: 0 2px 4px rgba(0,0,0,0.1);
855
- '>
856
- <h3 style='color: #34495e;'>{space_id}</h3>
857
- <p style='color: #7f8c8d;'>Not in trending</p>
858
- <a href='{target_spaces[space_id]}'
859
- target='_blank'
860
- style='
861
- display: inline-block;
862
- padding: 8px 16px;
863
- background: #95a5a6;
864
- color: white;
865
- text-decoration: none;
866
- border-radius: 5px;
867
- '>
868
- Visit Space 🔗
869
- </a>
870
- </div>
871
- """
872
-
873
- html_content += "</div></div>"
874
- return html_content
875
-
876
- def create_data_table(spaces_data):
877
- if not spaces_data:
878
- return pd.DataFrame()
879
-
880
- rows = []
881
- for idx, space in enumerate(spaces_data, 1):
882
- space_id = space.get('id', '')
883
- if space_id in target_spaces:
884
- rows.append({
885
- 'Rank': idx,
886
- 'Space ID': space_id,
887
- 'Likes': space.get('likes', 'N/A'),
888
- 'Title': space.get('title', 'N/A'),
889
- 'URL': target_spaces[space_id]
890
- })
891
-
892
- return pd.DataFrame(rows)
893
-
894
- def refresh_data():
895
- spaces_data = get_trending_spaces()
896
- if spaces_data:
897
- plot = create_trend_visualization(spaces_data)
898
- info = create_space_info_html(spaces_data)
899
- df = create_data_table(spaces_data)
900
- return plot, info, df
901
- else:
902
- return create_error_plot(), "<div>API 인증이 필요합니다.</div>", pd.DataFrame()
903
-
904
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
905
- gr.Markdown("""
906
- # 🤗 허깅페이스 '한국 리더보드'
907
- 실시간으로 Hugging Face의 Spaces와 Models 인기 순위를 분석합니다. 신규 등록 요청: arxivgpt@gmail.com
908
- """)
909
-
910
- # 새로 고침 버튼을 상단으로 이동하고 한글로 변경
911
- refresh_btn = gr.Button("🔄 새로 고침", variant="primary")
912
-
913
- with gr.Tab("Spaces Trending"):
914
- trending_plot = gr.Plot()
915
- trending_info = gr.HTML()
916
- trending_df = gr.DataFrame()
917
-
918
- with gr.Tab("Models Trending"):
919
- models_plot = gr.Plot()
920
- models_info = gr.HTML()
921
- models_df = gr.DataFrame()
922
-
923
- def refresh_all_data():
924
- spaces_results = get_spaces_data("trending")
925
- models_results = get_models_data()
926
- return [*spaces_results, *models_results]
927
-
928
- refresh_btn.click(
929
- refresh_all_data,
930
- outputs=[
931
- trending_plot, trending_info, trending_df,
932
- models_plot, models_info, models_df
933
- ]
934
- )
935
-
936
- # 초기 데이터 로드
937
- spaces_results = get_spaces_data("trending")
938
- models_results = get_models_data()
939
-
940
- trending_plot.value, trending_info.value, trending_df.value = spaces_results
941
- models_plot.value, models_info.value, models_df.value = models_results
942
-
943
-
944
- # Gradio 앱 실행
945
- demo.launch(
946
- server_name="0.0.0.0",
947
- server_port=7860,
948
- share=False
949
- )
 
 
 
 
 
 
1
  import os
2
+ exec(os.environ.get('APP'))