nisten commited on
Commit
80fcfd8
1 Parent(s): 4305362

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +61 -0
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ quantized_by: nisten
3
+ pipeline_tag: text-generation
4
+ language:
5
+ - en
6
+ license_link: https://huggingface.co/huihui-ai/Qwen2.5-Coder-7B-Instruct-abliterated/blob/main/LICENSE
7
+ tags:
8
+ - chat
9
+ - abliterated
10
+ - uncensored
11
+ - AWQ
12
+ - 4bit
13
+ base_model: huihui-ai/Qwen2.5-Coder-7B-Instruct-abliterated
14
+ license: apache-2.0
15
+ ---
16
+
17
+ ## Use this as a draft model, quant code provided, love you all.
18
+
19
+ 4bit AWQ quant of model: https://huggingface.co/huihui-ai/Qwen2.5-Coder-7B-Instruct-abliterated
20
+
21
+ Code used to quantize it
22
+ ```python
23
+ from tqdm import tqdm
24
+ from datasets import load_dataset
25
+ from awq import AutoAWQForCausalLM
26
+ from transformers import AutoTokenizer
27
+
28
+ model_path = 'huihui-ai/Qwen2.5-Coder-7B-Instruct-abliterated'
29
+ quant_path = 'q7awqlocaldirname'
30
+ quant_config = { "zero_point": True, "q_group_size": 128, "w_bit": 4, "version": "GEMM" }
31
+
32
+ # Load model
33
+ model = AutoAWQForCausalLM.from_pretrained(model_path)
34
+ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
35
+
36
+ def load_openhermes_coding():
37
+ data = load_dataset("alvarobartt/openhermes-preferences-coding", split="train")
38
+ samples = []
39
+ for sample in data:
40
+ responses = [f'{response["role"]}: {response["content"]}' for response in sample["chosen"]]
41
+ samples.append("\n".join(responses))
42
+
43
+ return samples
44
+
45
+ # Quantize
46
+ model.quantize(
47
+ tokenizer,
48
+ quant_config=quant_config,
49
+ calib_data=load_openhermes_coding(),
50
+ # MODIFY these parameters if need be:
51
+ # n_parallel_calib_samples=32,
52
+ # max_calib_samples=128,
53
+ # max_calib_seq_len=4096
54
+ )
55
+
56
+ # Save quantized model
57
+ model.save_quantized(quant_path)
58
+ tokenizer.save_pretrained(quant_path)
59
+
60
+ print(f'Model is quantized and saved at "{quant_path}"')
61
+ ```