File size: 3,204 Bytes
08e5ef1
7edda8b
2bede7c
4c4c78d
5fd1a0a
7edda8b
 
2bede7c
 
75b770e
08e5ef1
eb729a6
 
08e5ef1
1fba392
 
925d15e
 
08e5ef1
2bede7c
56bcfd1
eefa44d
925d15e
7686e09
eb729a6
 
 
 
 
 
 
 
 
 
 
892a74e
eb729a6
7c36326
eb729a6
 
5696fee
eefa44d
eb729a6
 
eefa44d
9781999
eefa44d
 
eb729a6
9781999
5696fee
9781999
 
 
 
 
eefa44d
eb729a6
9781999
 
00dc59f
 
 
2bede7c
00dc59f
098f871
ec000c3
3ad22ce
4c4c78d
3ad22ce
 
 
 
 
4c4c78d
098f871
3ad22ce
098f871
 
4c4c78d
 
 
892a74e
3ad22ce
 
 
4c4c78d
 
3ad22ce
 
 
 
 
098f871
 
c360795
3ad22ce
2bede7c
925d15e
098f871
925d15e
 
b31944c
925d15e
 
2bede7c
c360795
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import os
import shutil
import subprocess
import signal
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
import gradio as gr

from huggingface_hub import create_repo, HfApi
from huggingface_hub import snapshot_download
from huggingface_hub import whoami
from huggingface_hub import ModelCard
from huggingface_hub import login
from huggingface_hub import scan_cache_dir

from gradio_huggingfacehub_search import HuggingfaceHubSearch

from apscheduler.schedulers.background import BackgroundScheduler

from textwrap import dedent

from mlx_lm import convert

HF_TOKEN = os.environ.get("HF_TOKEN")

def clear_cache():
    scan = scan_cache_dir()
    to_delete = []
    for repo in scan.repos:
        if repo.repo_type == "model":
            to_delete.append([rev.commit_hash for rev in repo.revisions])

    scan.delete_revisions(*to_delete)

    print("Cache has been cleared")

def process_model(model_id, q_method,oauth_token: gr.OAuthToken | None):
    
    if oauth_token.token is None:
        raise ValueError("You must be logged in to use MLX-my-repo")
    
    model_name = model_id.split('/')[-1]
    username = whoami(oauth_token.token)["name"]

    login(token=oauth_token.token, add_to_git_credential=True)
    
    try:
        upload_repo = username + "/" + model_name + "-mlx"
        convert(model_id, quantize=True, upload_repo=upload_repo)
        clear_cache()
        return (
            f'Find your repo <a href=\'{new_repo_url}\' target="_blank" style="text-decoration:underline">here</a>',
            "llama.png",
        )
    except Exception as e:
        return (f"Error: {e}", "error.png")
    finally:
        shutil.rmtree("mlx_model", ignore_errors=True)
        clear_cache()
        print("Folder cleaned up successfully!")

css="""/* Custom CSS to allow scrolling */
.gradio-container {overflow-y: auto;}
"""
# Create Gradio interface
with gr.Blocks(css=css) as demo: 
    gr.Markdown("You must be logged in to use MLX-my-repo.")
    gr.LoginButton(min_width=250)

    model_id = HuggingfaceHubSearch(
        label="Hub Model ID",
        placeholder="Search for model id on Huggingface",
        search_type="model",
    )

    q_method = gr.Dropdown(
        ["Q4", "Q8"],
        label="Quantization Method",
        info="MLX quantization type",
        value="Q4",
        filterable=False,
        visible=True
    )
    
    iface = gr.Interface(
        fn=process_model,
        inputs=[
            model_id,
            q_method,
        ],
        outputs=[
            gr.Markdown(label="output"),
            gr.Image(show_label=False),
        ],
        title="Create your own MLX Quants, blazingly fast ⚡!",
        description="The space takes an HF repo as an input, quantizes it and creates a Public/ Private repo containing the selected quant under your HF user namespace.",
        api_name=False
    )

def restart_space():
    HfApi().restart_space(repo_id="reach-vb/mlx-my-repo", token=HF_TOKEN, factory_reboot=True)

scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=21600)
scheduler.start()

# Launch the interface
demo.queue(default_concurrency_limit=1, max_size=5).launch(debug=True, show_api=False)