fbaldassarri commited on
Commit
350c26d
1 Parent(s): 1ac67f9

Initial Upload

Browse files
README.md CHANGED
@@ -1,3 +1,88 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - de
5
+ - fr
6
+ - it
7
+ - pt
8
+ - hi
9
+ - es
10
+ - th
11
+ license: apache-2.0
12
+ library_name: transformers
13
+ tags:
14
+ - autoround
15
+ - auto-round
16
+ - intel
17
+ - gptq
18
+ - awq
19
+ - auto-awq
20
+ - autoawq
21
+ - woq
22
+ - meta
23
+ - pytorch
24
+ - transformers
25
+ model_name: SmolLM2 1.7B
26
+ base_model: HuggingFaceTB/SmolLM2-1.7B
27
+ inference: false
28
+ model_creator: HuggingFaceTB
29
+ pipeline_tag: text-generation
30
+ prompt_template: '{prompt}
31
+ '
32
+ quantized_by: fbaldassarri
33
+ ---
34
+
35
+ ## Model Information
36
+
37
+ Quantized version of [HuggingFaceTB/SmolLM2-1.7B](HuggingFaceTB/SmolLM2-1.7B) using torch.float32 for quantization tuning.
38
+ - 4 bits (INT4)
39
+ - group size = 128
40
+ - Symmetrical Quantization
41
+ - Method AutoAWQ
42
+
43
+ Quantization framework: [Intel AutoRound](https://github.com/intel/auto-round) v0.4.3
44
+
45
+ Note: this INT4 version of SmolLM2-360M-Instruct has been quantized to run inference through CPU.
46
+
47
+ ## Replication Recipe
48
+
49
+ ### Step 1 Install Requirements
50
+
51
+ I suggest to install requirements into a dedicated python-virtualenv or a conda enviroment.
52
+
53
+ ```
54
+ wget https://github.com/intel/auto-round/archive/refs/tags/v0.4.3.tar.gz
55
+ tar -xvzf v0.4.3.tar.gz
56
+ cd auto-round-0.4.3
57
+ pip install -r requirements-cpu.txt --upgrade
58
+ ```
59
+
60
+ ### Step 2 Build Intel AutoRound wheel from sources
61
+
62
+ ```
63
+ pip install -vvv --no-build-isolation -e .[cpu]
64
+ ```
65
+
66
+ ### Step 3 Script for Quantization
67
+
68
+ ```
69
+ from transformers import AutoModelForCausalLM, AutoTokenizer
70
+ model_name = "HuggingFaceTB/SmolLM2-1.7B"
71
+ model = AutoModelForCausalLM.from_pretrained(model_name)
72
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
73
+ from auto_round import AutoRound
74
+ bits, group_size, sym, device, amp = 4, 128, True, 'cpu', False
75
+ autoround = AutoRound(model, tokenizer, nsamples=128, iters=200, seqlen=512, batch_size=4, bits=bits, group_size=group_size, sym=sym, device=device, amp=amp)
76
+ autoround.quantize()
77
+ output_dir = "./AutoRound/HuggingFaceTB_SmolLM2-1.7B-auto_awq-int4-gs128-sym"
78
+ autoround.save_quantized(output_dir, format='auto_awq', inplace=True)
79
+ ```
80
+
81
+ ## License
82
+
83
+ [Apache 2.0 License](https://choosealicense.com/licenses/apache-2.0/)
84
+
85
+ ## Disclaimer
86
+
87
+ This quantized model comes with no warrenty. It has been developed only for research purposes.
88
+
config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "HuggingFaceTB/SmolLM2-1.7B",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 0,
9
+ "eos_token_id": 0,
10
+ "head_dim": 64,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 2048,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 8192,
15
+ "max_position_embeddings": 8192,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "num_attention_heads": 32,
19
+ "num_hidden_layers": 24,
20
+ "num_key_value_heads": 32,
21
+ "pretraining_tp": 1,
22
+ "quantization_config": {
23
+ "amp": false,
24
+ "autoround_version": "0.4.2",
25
+ "batch_size": 4,
26
+ "bits": 4,
27
+ "data_type": "int",
28
+ "dataset": "NeelNanda/pile-10k",
29
+ "enable_minmax_tuning": true,
30
+ "enable_norm_bias_tuning": false,
31
+ "enable_quanted_input": true,
32
+ "gradient_accumulate_steps": 1,
33
+ "group_size": 128,
34
+ "iters": 200,
35
+ "low_gpu_mem_usage": false,
36
+ "lr": 0.005,
37
+ "minmax_lr": 0.005,
38
+ "modules_to_not_convert": [
39
+ "lm_head"
40
+ ],
41
+ "nsamples": 128,
42
+ "quant_method": "awq",
43
+ "scale_dtype": "torch.float16",
44
+ "seqlen": 512,
45
+ "sym": true,
46
+ "to_quant_block_names": null,
47
+ "version": "gemm",
48
+ "zero_point": false
49
+ },
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": null,
52
+ "rope_theta": 130000,
53
+ "tie_word_embeddings": true,
54
+ "torch_dtype": "float32",
55
+ "transformers_version": "4.47.0",
56
+ "use_cache": true,
57
+ "vocab_size": 49152
58
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "transformers_version": "4.47.0"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:576216834941621f8a0a2501e9e3c9bb5040504ec257ebe3c59c95e4bde839ae
3
+ size 1239880632
quantization_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bits": 4,
3
+ "group_size": 128,
4
+ "sym": true,
5
+ "data_type": "int",
6
+ "enable_quanted_input": true,
7
+ "enable_minmax_tuning": true,
8
+ "seqlen": 512,
9
+ "batch_size": 4,
10
+ "scale_dtype": "torch.float16",
11
+ "lr": 0.005,
12
+ "minmax_lr": 0.005,
13
+ "gradient_accumulate_steps": 1,
14
+ "iters": 200,
15
+ "amp": false,
16
+ "nsamples": 128,
17
+ "low_gpu_mem_usage": false,
18
+ "to_quant_block_names": null,
19
+ "enable_norm_bias_tuning": false,
20
+ "dataset": "NeelNanda/pile-10k",
21
+ "autoround_version": "0.4.2",
22
+ "quant_method": "awq",
23
+ "zero_point": false,
24
+ "version": "gemm",
25
+ "modules_to_not_convert": [
26
+ "lm_head"
27
+ ]
28
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "<|im_start|>",
5
+ "<|im_end|>",
6
+ "<repo_name>",
7
+ "<reponame>",
8
+ "<file_sep>",
9
+ "<filename>",
10
+ "<gh_stars>",
11
+ "<issue_start>",
12
+ "<issue_comment>",
13
+ "<issue_closed>",
14
+ "<jupyter_start>",
15
+ "<jupyter_text>",
16
+ "<jupyter_code>",
17
+ "<jupyter_output>",
18
+ "<jupyter_script>",
19
+ "<empty_output>"
20
+ ],
21
+ "bos_token": {
22
+ "content": "<|endoftext|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "eos_token": {
29
+ "content": "<|endoftext|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ },
35
+ "unk_token": {
36
+ "content": "<|endoftext|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false
41
+ }
42
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<repo_name>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<reponame>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "5": {
45
+ "content": "<file_sep>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "6": {
53
+ "content": "<filename>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "7": {
61
+ "content": "<gh_stars>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "8": {
69
+ "content": "<issue_start>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "9": {
77
+ "content": "<issue_comment>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "10": {
85
+ "content": "<issue_closed>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "11": {
93
+ "content": "<jupyter_start>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "12": {
101
+ "content": "<jupyter_text>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "13": {
109
+ "content": "<jupyter_code>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "14": {
117
+ "content": "<jupyter_output>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "15": {
125
+ "content": "<jupyter_script>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "16": {
133
+ "content": "<empty_output>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": true
139
+ }
140
+ },
141
+ "additional_special_tokens": [
142
+ "<|endoftext|>",
143
+ "<|im_start|>",
144
+ "<|im_end|>",
145
+ "<repo_name>",
146
+ "<reponame>",
147
+ "<file_sep>",
148
+ "<filename>",
149
+ "<gh_stars>",
150
+ "<issue_start>",
151
+ "<issue_comment>",
152
+ "<issue_closed>",
153
+ "<jupyter_start>",
154
+ "<jupyter_text>",
155
+ "<jupyter_code>",
156
+ "<jupyter_output>",
157
+ "<jupyter_script>",
158
+ "<empty_output>"
159
+ ],
160
+ "bos_token": "<|endoftext|>",
161
+ "clean_up_tokenization_spaces": false,
162
+ "eos_token": "<|endoftext|>",
163
+ "extra_special_tokens": {},
164
+ "model_max_length": 8192,
165
+ "tokenizer_class": "GPT2Tokenizer",
166
+ "unk_token": "<|endoftext|>",
167
+ "vocab_size": 49152
168
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff