Upload folder using huggingface_hub (#3)
Browse files- 5a9c1b2a9a43e2cf12fc0c12abbfac6695bfda7b5183d5abc61390d80036790e (88d91b0d830871931d727068a35ad3b3be144c3b)
- 1a3cf829432d35b5ab5ff7e326a8db49db325db7a6451985f8806332e6a37529 (fa4ab1a44fc2fcdc84025de924b3942e1134d967)
- 98ba3df194d2ac1bb413b28d0618a6fddc17cc2377d1793229b8a4dc005864e4 (dbf24e7e3458854369ff9e0986339fe26a804ca8)
- README.md +2 -2
- config.json +2 -2
- plots.png +0 -0
- smash_config.json +1 -1
README.md
CHANGED
@@ -34,7 +34,7 @@ tags:
|
|
34 |
|
35 |
## Results
|
36 |
|
37 |
-
|
38 |
|
39 |
**Frequently Asked Questions**
|
40 |
- ***How does the compression work?*** The model is compressed with llm-int8.
|
@@ -61,7 +61,7 @@ You can run the smashed model with these steps:
|
|
61 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
62 |
|
63 |
model = AutoModelForCausalLM.from_pretrained("PrunaAI/GritLM-GritLM-7B-bnb-8bit-smashed",
|
64 |
-
trust_remote_code=True)
|
65 |
tokenizer = AutoTokenizer.from_pretrained("GritLM/GritLM-7B")
|
66 |
|
67 |
input_ids = tokenizer("What is the color of prunes?,", return_tensors='pt').to(model.device)["input_ids"]
|
|
|
34 |
|
35 |
## Results
|
36 |
|
37 |
+
![image info](./plots.png)
|
38 |
|
39 |
**Frequently Asked Questions**
|
40 |
- ***How does the compression work?*** The model is compressed with llm-int8.
|
|
|
61 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
62 |
|
63 |
model = AutoModelForCausalLM.from_pretrained("PrunaAI/GritLM-GritLM-7B-bnb-8bit-smashed",
|
64 |
+
trust_remote_code=True, device_map='auto')
|
65 |
tokenizer = AutoTokenizer.from_pretrained("GritLM/GritLM-7B")
|
66 |
|
67 |
input_ids = tokenizer("What is the color of prunes?,", return_tensors='pt').to(model.device)["input_ids"]
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "/tmp/
|
3 |
"architectures": [
|
4 |
"MistralForCausalLM"
|
5 |
],
|
@@ -29,7 +29,7 @@
|
|
29 |
"quantization_config": {
|
30 |
"bnb_4bit_compute_dtype": "bfloat16",
|
31 |
"bnb_4bit_quant_type": "fp4",
|
32 |
-
"bnb_4bit_use_double_quant":
|
33 |
"llm_int8_enable_fp32_cpu_offload": false,
|
34 |
"llm_int8_has_fp16_weight": false,
|
35 |
"llm_int8_skip_modules": [
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "/tmp/tmp7ro6rtjc",
|
3 |
"architectures": [
|
4 |
"MistralForCausalLM"
|
5 |
],
|
|
|
29 |
"quantization_config": {
|
30 |
"bnb_4bit_compute_dtype": "bfloat16",
|
31 |
"bnb_4bit_quant_type": "fp4",
|
32 |
+
"bnb_4bit_use_double_quant": false,
|
33 |
"llm_int8_enable_fp32_cpu_offload": false,
|
34 |
"llm_int8_has_fp16_weight": false,
|
35 |
"llm_int8_skip_modules": [
|
plots.png
ADDED
smash_config.json
CHANGED
@@ -8,7 +8,7 @@
|
|
8 |
"compilers": "None",
|
9 |
"task": "text_text_generation",
|
10 |
"device": "cuda",
|
11 |
-
"cache_dir": "/ceph/hdd/staff/charpent/.cache/
|
12 |
"batch_size": 1,
|
13 |
"model_name": "GritLM/GritLM-7B",
|
14 |
"pruning_ratio": 0.0,
|
|
|
8 |
"compilers": "None",
|
9 |
"task": "text_text_generation",
|
10 |
"device": "cuda",
|
11 |
+
"cache_dir": "/ceph/hdd/staff/charpent/.cache/models3rsn8hei",
|
12 |
"batch_size": 1,
|
13 |
"model_name": "GritLM/GritLM-7B",
|
14 |
"pruning_ratio": 0.0,
|