dsikka commited on
Commit
40a1332
1 Parent(s): 8ab0cb2

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. config.json +2 -2
  2. model.safetensors +1 -1
  3. recipe.yaml +4 -7
config.json CHANGED
@@ -28,7 +28,7 @@
28
  "Linear"
29
  ],
30
  "weights": {
31
- "actorder": "weight",
32
  "block_structure": null,
33
  "dynamic": false,
34
  "group_size": 128,
@@ -51,7 +51,7 @@
51
  "quantization_status": "compressed",
52
  "sparsity_config": {
53
  "format": "dense",
54
- "global_sparsity": 0.14292254340795876,
55
  "ignore": [],
56
  "registry_requires_subclass": false,
57
  "sparsity_structure": "unstructured",
 
28
  "Linear"
29
  ],
30
  "weights": {
31
+ "actorder": null,
32
  "block_structure": null,
33
  "dynamic": false,
34
  "group_size": 128,
 
51
  "quantization_status": "compressed",
52
  "sparsity_config": {
53
  "format": "dense",
54
+ "global_sparsity": 0.14375328644374685,
55
  "ignore": [],
56
  "registry_requires_subclass": false,
57
  "sparsity_structure": "unstructured",
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be61f74ba747c57efc6f2f920e60ef74e690fc46f0525ddfab231e2a58661e0a
3
  size 761968800
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:892873b79abcf66508b526a74179177e303de9b4b1464a6177096348faa7fd76
3
  size 761968800
recipe.yaml CHANGED
@@ -1,9 +1,6 @@
1
- quant_stage:
2
- quant_modifiers:
3
  GPTQModifier:
 
4
  ignore: [lm_head]
5
- config_groups:
6
- group_0:
7
- weights: {num_bits: 4, type: int, symmetric: true, strategy: group, group_size: 128,
8
- actorder: weight}
9
- targets: [Linear]
 
1
+ DEFAULT_stage:
2
+ DEFAULT_modifiers:
3
  GPTQModifier:
4
+ targets: Linear
5
  ignore: [lm_head]
6
+ scheme: W4A16