danielhanchen commited on
Commit
d4a2af1
·
verified ·
1 Parent(s): addf43a

Add files using upload-large-folder tool

Browse files
Files changed (2) hide show
  1. config.json +21 -1
  2. generation_config.json +1 -1
config.json CHANGED
@@ -20,6 +20,26 @@
20
  "num_key_value_heads": 8,
21
  "pad_token_id": 128004,
22
  "pretraining_tp": 1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  "rms_norm_eps": 1e-05,
24
  "rope_scaling": {
25
  "factor": 8.0,
@@ -31,7 +51,7 @@
31
  "rope_theta": 500000.0,
32
  "tie_word_embeddings": false,
33
  "torch_dtype": "bfloat16",
34
- "transformers_version": "4.48.1",
35
  "unsloth_fixed": true,
36
  "use_cache": true,
37
  "vocab_size": 128256
 
20
  "num_key_value_heads": 8,
21
  "pad_token_id": 128004,
22
  "pretraining_tp": 1,
23
+ "quantization_config": {
24
+ "_load_in_4bit": true,
25
+ "_load_in_8bit": false,
26
+ "bnb_4bit_compute_dtype": "bfloat16",
27
+ "bnb_4bit_quant_storage": "uint8",
28
+ "bnb_4bit_quant_type": "nf4",
29
+ "bnb_4bit_use_double_quant": true,
30
+ "llm_int8_enable_fp32_cpu_offload": false,
31
+ "llm_int8_has_fp16_weight": false,
32
+ "llm_int8_skip_modules": [
33
+ "lm_head",
34
+ "multi_modal_projector",
35
+ "merger",
36
+ "modality_projection"
37
+ ],
38
+ "llm_int8_threshold": 6.0,
39
+ "load_in_4bit": true,
40
+ "load_in_8bit": false,
41
+ "quant_method": "bitsandbytes"
42
+ },
43
  "rms_norm_eps": 1e-05,
44
  "rope_scaling": {
45
  "factor": 8.0,
 
51
  "rope_theta": 500000.0,
52
  "tie_word_embeddings": false,
53
  "torch_dtype": "bfloat16",
54
+ "transformers_version": "4.49.0.dev0",
55
  "unsloth_fixed": true,
56
  "use_cache": true,
57
  "vocab_size": 128256
generation_config.json CHANGED
@@ -7,5 +7,5 @@
7
  "pad_token_id": 128004,
8
  "temperature": 0.6,
9
  "top_p": 0.95,
10
- "transformers_version": "4.48.1"
11
  }
 
7
  "pad_token_id": 128004,
8
  "temperature": 0.6,
9
  "top_p": 0.95,
10
+ "transformers_version": "4.49.0.dev0"
11
  }