Upload folder using huggingface_hub
Browse files- README.md +117 -0
- config.json +42 -0
- generation_config.json +6 -0
- model.safetensors.index.json +0 -0
- output-00001-of-00012.safetensors +3 -0
- output-00002-of-00012.safetensors +3 -0
- output-00003-of-00012.safetensors +3 -0
- output-00004-of-00012.safetensors +3 -0
- output-00005-of-00012.safetensors +3 -0
- output-00006-of-00012.safetensors +3 -0
- output-00007-of-00012.safetensors +3 -0
- output-00008-of-00012.safetensors +3 -0
- output-00009-of-00012.safetensors +3 -0
- output-00010-of-00012.safetensors +3 -0
- output-00011-of-00012.safetensors +3 -0
- output-00012-of-00012.safetensors +3 -0
- special_tokens_map.json +30 -0
- tokenizer.model +3 -0
- tokenizer_config.json +44 -0
README.md
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- en
|
4 |
+
license: apache-2.0
|
5 |
+
datasets:
|
6 |
+
- teknium/OpenHermes-2.5
|
7 |
+
base_model: mistral-community/Mixtral-8x22B-v0.1
|
8 |
+
tags:
|
9 |
+
- exl2
|
10 |
+
---
|
11 |
+
|
12 |
+
# mixtral-8x22b-instruct-oh - EXL2 5.5bpw
|
13 |
+
|
14 |
+
This is a 5.5bpw EXL2 quant of [fireworks-ai/mixtral-8x22b-instruct-oh](https://huggingface.co/fireworks-ai/mixtral-8x22b-instruct-oh)
|
15 |
+
|
16 |
+
Details about the model can be found at the above model page.
|
17 |
+
|
18 |
+
## EXL2 Version
|
19 |
+
|
20 |
+
These quants were made with exllamav2 version 0.0.18. Quants made on this version of EXL2 may not work on older versions of the exllamav2 library.
|
21 |
+
|
22 |
+
If you have problems loading these models, please update Text Generation WebUI to the latest version.
|
23 |
+
|
24 |
+
## Perplexity Scoring
|
25 |
+
|
26 |
+
Below are the perplexity scores for the EXL2 models. A lower score is better.
|
27 |
+
|
28 |
+
_TODO_
|
29 |
+
|
30 |
+
### Perplexity Script
|
31 |
+
|
32 |
+
This was the script used for perplexity testing.
|
33 |
+
|
34 |
+
```bash
|
35 |
+
#!/bin/bash
|
36 |
+
|
37 |
+
# Activate the conda environment
|
38 |
+
source ~/miniconda3/etc/profile.d/conda.sh
|
39 |
+
conda activate exllamav2
|
40 |
+
|
41 |
+
# Set the model name and bit size
|
42 |
+
MODEL_NAME="mixtral-8x22b-instruct-oh"
|
43 |
+
BIT_PRECISIONS=(7.0 6.0 5.5 5.0 4.5 4.0 3.5 3.0 2.75 2.5 2.25)
|
44 |
+
|
45 |
+
# Print the markdown table header
|
46 |
+
echo "| Quant Level | Perplexity Score |"
|
47 |
+
echo "|-------------|------------------|"
|
48 |
+
|
49 |
+
for BIT_PRECISION in "${BIT_PRECISIONS[@]}"
|
50 |
+
do
|
51 |
+
MODEL_DIR="models/${MODEL_NAME}_exl2_${BIT_PRECISION}bpw"
|
52 |
+
if [ -d "$MODEL_DIR" ]; then
|
53 |
+
output=$(python test_inference.py -m "$MODEL_DIR" -gs 44,48 -ed data/wikitext/wikitext-2-v1.parquet)
|
54 |
+
score=$(echo "$output" | grep -oP 'Evaluation perplexity: \K[\d.]+')
|
55 |
+
echo "| $BIT_PRECISION | $score |"
|
56 |
+
fi
|
57 |
+
done
|
58 |
+
```
|
59 |
+
|
60 |
+
|
61 |
+
## Quant Details
|
62 |
+
|
63 |
+
This is the script used for quantization.
|
64 |
+
|
65 |
+
```bash
|
66 |
+
#!/bin/bash
|
67 |
+
|
68 |
+
# Activate the conda environment
|
69 |
+
source ~/miniconda3/etc/profile.d/conda.sh
|
70 |
+
conda activate exllamav2
|
71 |
+
|
72 |
+
# Set the model name and bit size
|
73 |
+
MODEL_NAME="mixtral-8x22b-instruct-oh"
|
74 |
+
|
75 |
+
# Define variables
|
76 |
+
MODEL_DIR="models/$MODEL_NAME"
|
77 |
+
OUTPUT_DIR="exl2_$MODEL_NAME"
|
78 |
+
MEASUREMENT_FILE="measurements/$MODEL_NAME.json"
|
79 |
+
|
80 |
+
# Create the measurement file if needed
|
81 |
+
if [ ! -f "$MEASUREMENT_FILE" ]; then
|
82 |
+
echo "Creating $MEASUREMENT_FILE"
|
83 |
+
# Create directories
|
84 |
+
if [ -d "$OUTPUT_DIR" ]; then
|
85 |
+
rm -r "$OUTPUT_DIR"
|
86 |
+
fi
|
87 |
+
mkdir "$OUTPUT_DIR"
|
88 |
+
|
89 |
+
python convert.py -i $MODEL_DIR -o $OUTPUT_DIR -nr -om $MEASUREMENT_FILE
|
90 |
+
fi
|
91 |
+
|
92 |
+
# Choose one of the below. Either create a single quant for testing or a batch of them.
|
93 |
+
# BIT_PRECISIONS=(2.25)
|
94 |
+
BIT_PRECISIONS=(7.0 6.0 5.5 5.0 4.5 4.0 3.5 3.0 2.75 2.5 2.25)
|
95 |
+
|
96 |
+
for BIT_PRECISION in "${BIT_PRECISIONS[@]}"
|
97 |
+
do
|
98 |
+
CONVERTED_FOLDER="models/${MODEL_NAME}_exl2_${BIT_PRECISION}bpw"
|
99 |
+
|
100 |
+
# If it doesn't already exist, make the quant
|
101 |
+
if [ ! -d "$CONVERTED_FOLDER" ]; then
|
102 |
+
|
103 |
+
echo "Creating $CONVERTED_FOLDER"
|
104 |
+
|
105 |
+
# Create directories
|
106 |
+
if [ -d "$OUTPUT_DIR" ]; then
|
107 |
+
rm -r "$OUTPUT_DIR"
|
108 |
+
fi
|
109 |
+
mkdir "$OUTPUT_DIR"
|
110 |
+
mkdir "$CONVERTED_FOLDER"
|
111 |
+
|
112 |
+
# Run conversion commands
|
113 |
+
python convert.py -i $MODEL_DIR -o $OUTPUT_DIR -nr -m $MEASUREMENT_FILE -b $BIT_PRECISION -cf $CONVERTED_FOLDER
|
114 |
+
|
115 |
+
fi
|
116 |
+
done
|
117 |
+
```
|
config.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "mistral-community/Mixtral-8x22B-v0.1",
|
3 |
+
"architectures": [
|
4 |
+
"MixtralForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 1,
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"hidden_act": "silu",
|
10 |
+
"hidden_size": 6144,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 16384,
|
13 |
+
"max_position_embeddings": 65536,
|
14 |
+
"model_type": "mixtral",
|
15 |
+
"num_attention_heads": 48,
|
16 |
+
"num_experts_per_tok": 2,
|
17 |
+
"num_hidden_layers": 56,
|
18 |
+
"num_key_value_heads": 8,
|
19 |
+
"num_local_experts": 8,
|
20 |
+
"output_router_logits": false,
|
21 |
+
"rms_norm_eps": 1e-05,
|
22 |
+
"rope_theta": 1000000,
|
23 |
+
"router_aux_loss_coef": 0.001,
|
24 |
+
"router_jitter_noise": 0.0,
|
25 |
+
"sliding_window": null,
|
26 |
+
"tie_word_embeddings": false,
|
27 |
+
"torch_dtype": "float16",
|
28 |
+
"transformers_version": "4.39.3",
|
29 |
+
"use_cache": true,
|
30 |
+
"vocab_size": 32000,
|
31 |
+
"quantization_config": {
|
32 |
+
"quant_method": "exl2",
|
33 |
+
"version": "0.0.18",
|
34 |
+
"bits": 5.5,
|
35 |
+
"head_bits": 6,
|
36 |
+
"calibration": {
|
37 |
+
"rows": 100,
|
38 |
+
"length": 2048,
|
39 |
+
"dataset": "(default)"
|
40 |
+
}
|
41 |
+
}
|
42 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"transformers_version": "4.39.3"
|
6 |
+
}
|
model.safetensors.index.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
output-00001-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:405e6d0ea391f4d86af889a3ba54eee11445cc56ff8d9be73277819ea634cdad
|
3 |
+
size 8590074272
|
output-00002-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9eb35b3db7a65213baa44a513af6f55fb1a229b36f0c970b289420d87b936818
|
3 |
+
size 8589661744
|
output-00003-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:66a11d02597f71a1c8aed385de2b14a9cfdc6443644f4a2d4385494bee91d96d
|
3 |
+
size 8590025576
|
output-00004-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cd21041f485aa3ad1a985295036c39925abbc65ecadb47d5e5679e3cf0bb3b81
|
3 |
+
size 8566465896
|
output-00005-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3654e0837a793236e37423c7fc2064578c25cfa76dea96fb0e792449f46547b9
|
3 |
+
size 8573119224
|
output-00006-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c57f87fe9f317a15e4e07b11de70230b09d46ca94e95dc45603f59898453cb21
|
3 |
+
size 8545072456
|
output-00007-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:25efeac38d979df49b7828e2c564021202471c20e013c02fc633e97932c3c322
|
3 |
+
size 8522328432
|
output-00008-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1eed7bc528c486e1251473a3356fc68eb78cfd142ff62be67f2ab84767aeca71
|
3 |
+
size 8545072472
|
output-00009-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b1ec9d04088454a60a3901df818a02f48c1716724e40d53d6fd8ea72446bd8d
|
3 |
+
size 8522328432
|
output-00010-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:97111da7a7f5a66ca9fdb76bfdf8308315bf13d1704f76599e2dd3ab57c497fa
|
3 |
+
size 8531766616
|
output-00011-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9d045951be8a9b1e779bfa18c3c5ff328f50a61088bb3346e1348fbb338d344b
|
3 |
+
size 8534706272
|
output-00012-of-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8a2186fe78839bfc40b1e28f06d0bad4771a60926dd4dd1a618a5ef9fd50068a
|
3 |
+
size 2845467632
|
special_tokens_map.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "<unk>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"unk_token": {
|
24 |
+
"content": "<unk>",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
}
|
30 |
+
}
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
|
3 |
+
size 493443
|
tokenizer_config.json
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"add_prefix_space": true,
|
5 |
+
"added_tokens_decoder": {
|
6 |
+
"0": {
|
7 |
+
"content": "<unk>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false,
|
12 |
+
"special": true
|
13 |
+
},
|
14 |
+
"1": {
|
15 |
+
"content": "<s>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": false,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false,
|
20 |
+
"special": true
|
21 |
+
},
|
22 |
+
"2": {
|
23 |
+
"content": "</s>",
|
24 |
+
"lstrip": false,
|
25 |
+
"normalized": false,
|
26 |
+
"rstrip": false,
|
27 |
+
"single_word": false,
|
28 |
+
"special": true
|
29 |
+
}
|
30 |
+
},
|
31 |
+
"additional_special_tokens": [],
|
32 |
+
"bos_token": "<s>",
|
33 |
+
"chat_template": "{%- set message_roles = ['USER', 'ASSISTANT'] -%}\n{%- set ns = namespace(initial_system_message_handled=false, messages=messages) -%}\n{{ bos_token }}\n{%- for message in ns.messages -%}\n {%- if message['role'] | upper == 'SYSTEM' and not ns.initial_system_message_handled -%}\n {%- set ns.initial_system_message_handled = true -%}\n {{'SYSTEM: ' + message['content'] }}\n {%- elif message['role'] | upper != 'SYSTEM' -%}\n {%- if (message['role'] | upper == 'USER') != ((loop.index0 - (1 if ns.initial_system_message_handled else 0)) % 2 == 0) -%}\n {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif -%}\n {%- if loop.index0 > 0 or ns.initial_system_message_handled -%}\n {{ '\\n\\n' }}\n {%- endif -%}\n {%- if message['role'] | upper == 'USER' -%}\n {{ 'USER: ' + message['content'] }}\n {%- elif message['role'] | upper == 'ASSISTANT' -%}\n {{ 'ASSISTANT: ' + message['content'] + eos_token}}\n {%- endif -%}\n {%- endif -%}\n{%- endfor -%}\n",
|
34 |
+
"clean_up_tokenization_spaces": false,
|
35 |
+
"eos_token": "</s>",
|
36 |
+
"legacy": false,
|
37 |
+
"model_max_length": 1000000000000000019884624838656,
|
38 |
+
"pad_token": "<unk>",
|
39 |
+
"sp_model_kwargs": {},
|
40 |
+
"spaces_between_special_tokens": false,
|
41 |
+
"tokenizer_class": "LlamaTokenizer",
|
42 |
+
"unk_token": "<unk>",
|
43 |
+
"use_default_system_prompt": false
|
44 |
+
}
|