|
from huggingface_hub import HfApi, HfFileSystem |
|
import re |
|
from tqdm import tqdm |
|
import concurrent.futures |
|
import gradio as gr |
|
import datetime |
|
import pandas as pd |
|
import os |
|
import threading |
|
import time |
|
|
|
HF_TOKEN = os.getenv('HF_TOKEN') |
|
|
|
api = HfApi() |
|
fs = HfFileSystem() |
|
|
|
def restart_space(): |
|
time.sleep(36000) |
|
api.restart_space(repo_id="Tanvir1337/mradermacher-quantized-models", token=HF_TOKEN) |
|
|
|
text = f""" |
|
π― The Leaderboard aims to track mradermacher's gguf quantized models. |
|
|
|
## π οΈ Backend |
|
|
|
The leaderboard's backend mainly runs on the [Hugging Face Hub API](https://huggingface.co/docs/huggingface_hub/v0.5.1/en/package_reference/hf_api). |
|
|
|
## π Searching |
|
|
|
You can search for author or a spesific model using the search bar. |
|
|
|
## β Last Update |
|
|
|
This space is last updated in **{str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"))}**. |
|
|
|
## π Important Note |
|
|
|
This space potentially includes incorrectly quantized models for a model. |
|
|
|
If you find any incorrectly quantized model, please report it to me. |
|
""" |
|
|
|
quant_models = [i.__dict__['id'] for i in api.list_models(author="mradermacher") if "GGUF" in i.__dict__['id']] |
|
|
|
pattern = r'\(https://huggingface\.co/([^/]+)/([^/]+)\)' |
|
liste = {} |
|
|
|
def process_model(i, pattern, liste): |
|
text = fs.read_text(i + "/README.md") |
|
matches = re.search(pattern, text) |
|
|
|
if matches: |
|
author = matches.group(1) |
|
model_name = matches.group(2) |
|
full_id = (author + "/" + model_name).split(")")[0] |
|
|
|
try: |
|
liste[full_id].append(i) |
|
except KeyError: |
|
liste[full_id] = [i] |
|
|
|
|
|
num_threads = 64 |
|
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor: |
|
futures = [] |
|
for i in quant_models: |
|
future = executor.submit(process_model, i, pattern, liste) |
|
futures.append(future) |
|
|
|
concurrent.futures.wait(futures) |
|
|
|
|
|
authors, models, gguf = [], [], [] |
|
|
|
|
|
for model, values in liste.items(): |
|
models.append(model) |
|
|
|
gguf_value = None |
|
|
|
for value in values: |
|
if "-GGUF" in value: |
|
gguf_value = value |
|
|
|
authors.append(model.split('/')[0]) |
|
gguf.append(gguf_value) |
|
|
|
|
|
df = pd.DataFrame({'π€ Author Name': authors, 'π€ Model Name': models, 'π₯ GGUF': gguf}) |
|
|
|
|
|
def search(search_text): |
|
if not search_text: |
|
return df |
|
|
|
if len(search_text.split('/'))>1: |
|
return df[df['π€ Model Name'] == clickable(search_text)] |
|
else: |
|
return df[df['π€ Author Name'] == clickable(search_text)] |
|
|
|
|
|
def clickable(x): |
|
return None if not x else f'<a target="_blank" href="https://huggingface.co/{x}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{x}</a>' |
|
|
|
|
|
def to_clickable(df): |
|
for column in list(df.columns): |
|
df[column] = df[column].apply(lambda x: clickable(x)) |
|
return df |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("""<center><img src = "https://huggingface.co/avatars/6b97d30ff0bdb5d5c633ba850af739cd.svg" width=200 height=200></center>""") |
|
gr.Markdown("""<h1 align="center" id="space-title">mradermacher Quantized Models</h1>""") |
|
gr.Markdown(text) |
|
|
|
with gr.Column(min_width=320): |
|
search_bar = gr.Textbox(placeholder="π Search for a author or a specific model", show_label=False) |
|
|
|
|
|
df_clickable = to_clickable(df) |
|
gr_df = gr.Dataframe(df_clickable, interactive=False, datatype=["markdown"]*len(df.columns)) |
|
|
|
search_bar.submit(fn=search, inputs=search_bar, outputs=gr_df) |
|
|
|
threading.Thread(target=restart_space).start() |
|
demo.launch() |
|
|