Upload folder using huggingface_hub

#3
config.json CHANGED
@@ -25,7 +25,7 @@
25
  "quantization_config": {
26
  "bits": 4,
27
  "checkpoint_format": "gptq",
28
- "damp_percent": 0.01,
29
  "desc_act": true,
30
  "group_size": 128,
31
  "lm_head": false,
 
25
  "quantization_config": {
26
  "bits": 4,
27
  "checkpoint_format": "gptq",
28
+ "damp_percent": 0.005,
29
  "desc_act": true,
30
  "group_size": 128,
31
  "lm_head": false,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:43d3b3a29041429f3aff9d8a9c3d3a8808181aecfb0502944b5b880debbf7b4e
3
  size 5732943672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1ca744b94b70291092478822d996cae44c88f04ea7543fb3fda9e9cd034783c
3
  size 5732943672
quantize_config.json CHANGED
@@ -5,10 +5,10 @@
5
  "static_groups": false,
6
  "sym": true,
7
  "lm_head": false,
8
- "damp_percent": 0.01,
9
  "true_sequential": true,
10
  "model_name_or_path": "",
11
- "model_file_base_name": "",
12
  "quant_method": "gptq",
13
  "checkpoint_format": "gptq",
14
  "meta": {
 
5
  "static_groups": false,
6
  "sym": true,
7
  "lm_head": false,
8
+ "damp_percent": 0.005,
9
  "true_sequential": true,
10
  "model_name_or_path": "",
11
+ "model_file_base_name": "model",
12
  "quant_method": "gptq",
13
  "checkpoint_format": "gptq",
14
  "meta": {
special_tokens_map.json CHANGED
@@ -13,5 +13,5 @@
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": "<|eot_id|>"
17
  }
 
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "<|finetune_right_pad_id|>"
17
  }
tokenizer_config.json CHANGED
@@ -2058,6 +2058,6 @@
2058
  "attention_mask"
2059
  ],
2060
  "model_max_length": 131072,
2061
- "pad_token": "<|eot_id|>",
2062
  "tokenizer_class": "PreTrainedTokenizerFast"
2063
  }
 
2058
  "attention_mask"
2059
  ],
2060
  "model_max_length": 131072,
2061
+ "pad_token": "<|finetune_right_pad_id|>",
2062
  "tokenizer_class": "PreTrainedTokenizerFast"
2063
  }