AnishKumbhar commited on
Commit
13a1b4a
1 Parent(s): 8261e17

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -7
app.py CHANGED
@@ -12,11 +12,12 @@
12
  %cd GPTQ-for-LLaMa
13
  !python setup_cuda.py install
14
 
15
- hf-download https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/config.json --output-dir /content/text-generation-webui/models/Llama-2-7b-Chat-GPTQ
16
- hf-download https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/generation_config.json --output-dir /content/text-generation-webui/models/Llama-2-7b-Chat-GPTQ
17
- hf-download https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/special_tokens_map.json --output-dir /content/text-generation-webui/models/Llama-2-7b-Chat-GPTQ
18
- hf-download https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/resolve/main/tokenizer.model --output-dir /content/text-generation-webui/models/Llama-2-7b-Chat-GPTQ
19
- hf-download https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/tokenizer_config.json --output-dir /content/text-generation-webui/models/Llama-2-7b-Chat-GPTQ
20
- hf-download https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/resolve/main/gptq_model-4bit-128g.safetensors --output-dir /content/text-generation-webui/models/Llama-2-7b-Chat-GPTQ
21
- %cd /content/text-generation-webui
 
22
  !python server.py --share --chat --wbits 4 --groupsize 128 --model_type llama
 
12
  %cd GPTQ-for-LLaMa
13
  !python setup_cuda.py install
14
 
15
+ output_dir="/content/text-generation-webui/models/Llama-2-7b-Chat-GPTQ"
16
+
17
+ wget -O "${output_dir}/config.json" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/config.json"
18
+ wget -O "${output_dir}/generation_config.json" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/generation_config.json"
19
+ wget -O "${output_dir}/special_tokens_map.json" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/special_tokens_map.json"
20
+ wget -O "${output_dir}/tokenizer.model" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/resolve/main/tokenizer.model"
21
+ wget -O "${output_dir}/tokenizer_config.json" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/tokenizer_config.json"
22
+ wget -O "${output_dir}/gptq_model-4bit-128g.safetensors" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/resolve/main/gptq_model-4bit-128g.safetensors"%cd /content/text-generation-webui
23
  !python server.py --share --chat --wbits 4 --groupsize 128 --model_type llama