ZeroUniqueness
commited on
Commit
•
bc0b0da
1
Parent(s):
e3b6d08
2023-08-04 18:53:55 Autosave for checkpoint additions
Browse files- README.md +12 -0
- adapter_config.json +4 -4
- adapter_model.bin +1 -1
README.md
CHANGED
@@ -37,6 +37,17 @@ The following `bitsandbytes` quantization config was used during training:
|
|
37 |
- bnb_4bit_use_double_quant: True
|
38 |
- bnb_4bit_compute_dtype: bfloat16
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
The following `bitsandbytes` quantization config was used during training:
|
41 |
- load_in_8bit: False
|
42 |
- load_in_4bit: True
|
@@ -52,5 +63,6 @@ The following `bitsandbytes` quantization config was used during training:
|
|
52 |
- PEFT 0.5.0.dev0
|
53 |
- PEFT 0.5.0.dev0
|
54 |
- PEFT 0.5.0.dev0
|
|
|
55 |
|
56 |
- PEFT 0.5.0.dev0
|
|
|
37 |
- bnb_4bit_use_double_quant: True
|
38 |
- bnb_4bit_compute_dtype: bfloat16
|
39 |
|
40 |
+
The following `bitsandbytes` quantization config was used during training:
|
41 |
+
- load_in_8bit: False
|
42 |
+
- load_in_4bit: True
|
43 |
+
- llm_int8_threshold: 6.0
|
44 |
+
- llm_int8_skip_modules: None
|
45 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
46 |
+
- llm_int8_has_fp16_weight: False
|
47 |
+
- bnb_4bit_quant_type: nf4
|
48 |
+
- bnb_4bit_use_double_quant: True
|
49 |
+
- bnb_4bit_compute_dtype: bfloat16
|
50 |
+
|
51 |
The following `bitsandbytes` quantization config was used during training:
|
52 |
- load_in_8bit: False
|
53 |
- load_in_4bit: True
|
|
|
63 |
- PEFT 0.5.0.dev0
|
64 |
- PEFT 0.5.0.dev0
|
65 |
- PEFT 0.5.0.dev0
|
66 |
+
- PEFT 0.5.0.dev0
|
67 |
|
68 |
- PEFT 0.5.0.dev0
|
adapter_config.json
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
"base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
|
4 |
"bias": "none",
|
5 |
"fan_in_fan_out": null,
|
6 |
-
"inference_mode":
|
7 |
"init_lora_weights": true,
|
8 |
"layers_pattern": null,
|
9 |
"layers_to_transform": null,
|
@@ -14,13 +14,13 @@
|
|
14 |
"r": 32,
|
15 |
"revision": null,
|
16 |
"target_modules": [
|
17 |
-
"k_proj",
|
18 |
-
"q_proj",
|
19 |
"v_proj",
|
20 |
"o_proj",
|
21 |
"gate_proj",
|
|
|
|
|
22 |
"down_proj",
|
23 |
-
"
|
24 |
],
|
25 |
"task_type": "CAUSAL_LM"
|
26 |
}
|
|
|
3 |
"base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
|
4 |
"bias": "none",
|
5 |
"fan_in_fan_out": null,
|
6 |
+
"inference_mode": false,
|
7 |
"init_lora_weights": true,
|
8 |
"layers_pattern": null,
|
9 |
"layers_to_transform": null,
|
|
|
14 |
"r": 32,
|
15 |
"revision": null,
|
16 |
"target_modules": [
|
|
|
|
|
17 |
"v_proj",
|
18 |
"o_proj",
|
19 |
"gate_proj",
|
20 |
+
"k_proj",
|
21 |
+
"up_proj",
|
22 |
"down_proj",
|
23 |
+
"q_proj"
|
24 |
],
|
25 |
"task_type": "CAUSAL_LM"
|
26 |
}
|
adapter_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 500897101
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb27235038e583f4bb42db82005cef114ed1580c627dd215688b950029824795
|
3 |
size 500897101
|