sharpenb commited on
Commit
afcd60a
1 Parent(s): afec80f

Upload folder using huggingface_hub (#2)

Browse files

- cbdeeef929568dd359c966d46dd8cdd1e3602db4c76fd6ec5d10d3d2c3900ec0 (1fedf4863225870a37da887bf0da459bb9fa70aa)

Files changed (2) hide show
  1. README.md +1 -1
  2. smash_config.json +5 -5
README.md CHANGED
@@ -31,7 +31,7 @@ tags:
31
  - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
32
  - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
33
  - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
34
- - Join Pruna AI community on Discord [here](https://discord.gg/rskEr4BZJx) to share feedback/suggestions or get help.
35
 
36
  ## Results
37
 
 
31
  - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
32
  - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
33
  - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
34
+ - Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
35
 
36
  ## Results
37
 
smash_config.json CHANGED
@@ -2,19 +2,19 @@
2
  "api_key": null,
3
  "verify_url": "http://johnrachwan.pythonanywhere.com",
4
  "smash_config": {
5
- "pruners": "[]",
6
  "pruning_ratio": 0.0,
7
- "factorizers": "[]",
8
  "quantizers": "['hqq']",
9
  "weight_quantization_bits": 4,
10
- "output_deviation": 0.01,
11
- "compilers": "[]",
12
  "static_batch": true,
13
  "static_shape": true,
14
  "controlnet": "None",
15
  "unet_dim": 4,
16
  "device": "cuda",
17
- "cache_dir": "/ceph/hdd/staff/charpent/.cache/modelsd8gqf7ii",
18
  "batch_size": 1,
19
  "model_name": "apple/OpenELM-270M-Instruct",
20
  "task": "text_text_generation",
 
2
  "api_key": null,
3
  "verify_url": "http://johnrachwan.pythonanywhere.com",
4
  "smash_config": {
5
+ "pruners": "None",
6
  "pruning_ratio": 0.0,
7
+ "factorizers": "None",
8
  "quantizers": "['hqq']",
9
  "weight_quantization_bits": 4,
10
+ "output_deviation": 0.005,
11
+ "compilers": "None",
12
  "static_batch": true,
13
  "static_shape": true,
14
  "controlnet": "None",
15
  "unet_dim": 4,
16
  "device": "cuda",
17
+ "cache_dir": "/ceph/hdd/staff/charpent/.cache/modelsiwkjmpnv",
18
  "batch_size": 1,
19
  "model_name": "apple/OpenELM-270M-Instruct",
20
  "task": "text_text_generation",