Abhinav Kulkarni commited on
Commit
1b396be
1 Parent(s): b5dc008

Updated README

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -30,9 +30,9 @@ For Docker users, the `nvcr.io/nvidia/pytorch:23.06-py3` image is runtime v12.1
30
  ## How to Use
31
 
32
  ```bash
33
- git clone https://github.com/mit-han-lab/llm-awq \
34
  && cd llm-awq \
35
- && git checkout 71d8e68df78de6c0c817b029a568c064bf22132d \
36
  && pip install -e . \
37
  && cd awq/kernels \
38
  && python setup.py install
@@ -43,9 +43,9 @@ import torch
43
  from awq.quantize.quantizer import real_quantize_model_weight
44
  from transformers import AutoModelForCausalLM, AutoConfig, AutoTokenizer
45
  from accelerate import init_empty_weights, load_checkpoint_and_dispatch
46
- from huggingface_hub import hf_hub_download
47
 
48
- model_name = "mosaicml/mpt-7b-chat"
49
 
50
  # Config
51
  config = AutoConfig.from_pretrained(model_name, trust_remote_code=True)
@@ -60,7 +60,7 @@ q_config = {
60
  "q_group_size": 128,
61
  }
62
 
63
- load_quant = hf_hub_download('abhinavkulkarni/mosaicml-mpt-7b-chat-w4-g128-awq', 'pytorch_model.bin')
64
 
65
  with init_empty_weights():
66
  model = AutoModelForCausalLM.from_config(config=config,
 
30
  ## How to Use
31
 
32
  ```bash
33
+ git clone https://github.com/abhinavkulkarni/llm-awq \
34
  && cd llm-awq \
35
+ && git checkout e977c5a570c5048b67a45b1eb823b81de02d0d60 \
36
  && pip install -e . \
37
  && cd awq/kernels \
38
  && python setup.py install
 
43
  from awq.quantize.quantizer import real_quantize_model_weight
44
  from transformers import AutoModelForCausalLM, AutoConfig, AutoTokenizer
45
  from accelerate import init_empty_weights, load_checkpoint_and_dispatch
46
+ from huggingface_hub import snapshot_download
47
 
48
+ model_name = "abhinavkulkarni/mosaicml-mpt-7b-chat-w4-g128-awq"
49
 
50
  # Config
51
  config = AutoConfig.from_pretrained(model_name, trust_remote_code=True)
 
60
  "q_group_size": 128,
61
  }
62
 
63
+ load_quant = snapshot_download(model_name)
64
 
65
  with init_empty_weights():
66
  model = AutoModelForCausalLM.from_config(config=config,