zx-modelcloud's picture
Update README.md
c2229a7 verified
|
raw
history blame
956 Bytes

This model was exported using GPTQModel.

How to run this model

# install mlx
pip install mlx_lm
from mlx_lm import load, generate

mlx_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"
mlx_model, tokenizer = load(mlx_path)
prompt = "The capital of France is"

messages = [{"role": "user", "content": prompt}]
prompt = tokenizer.apply_chat_template(
    messages, add_generation_prompt=True
)

text = generate(mlx_model, tokenizer, prompt=prompt, verbose=True)

Export gptq to mlx

# install gptqmodel with mlx
pip install gptqmodel[mlx] --no-build-isolation
from gptqmodel import GPTQModel

# load gptq quantized model
gptq_model_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-v3"
mlx_path = f"./vortex/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"

# export to mlx model
GPTQModel.export(gptq_model_path, mlx_path, "mlx")