fbaldassarri
commited on
Commit
•
c4dd12f
1
Parent(s):
8e40cae
Upload README.md
Browse files
README.md
CHANGED
@@ -68,8 +68,8 @@ pip install -vvv --no-build-isolation -e .[cpu]
|
|
68 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
69 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
70 |
from auto_round import AutoRound
|
71 |
-
bits, group_size, sym = 4, 128, False
|
72 |
-
autoround = AutoRound(model, tokenizer, nsamples=128, iters=200, seqlen=512, batch_size=4, bits=bits, group_size=group_size, sym=sym)
|
73 |
autoround.quantize()
|
74 |
output_dir = "./AutoRound/HuggingFaceTB_SmolLM2-1.7B-auto_gptq-int4-gs128-asym"
|
75 |
autoround.save_quantized(output_dir, format='auto_gptq', inplace=True)
|
|
|
68 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
69 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
70 |
from auto_round import AutoRound
|
71 |
+
bits, group_size, sym, device, amp = 4, 128, False, 'cpu', False
|
72 |
+
autoround = AutoRound(model, tokenizer, nsamples=128, iters=200, seqlen=512, batch_size=4, bits=bits, group_size=group_size, sym=sym, device=device, amp=amp)
|
73 |
autoround.quantize()
|
74 |
output_dir = "./AutoRound/HuggingFaceTB_SmolLM2-1.7B-auto_gptq-int4-gs128-asym"
|
75 |
autoround.save_quantized(output_dir, format='auto_gptq', inplace=True)
|