dhmeltzer commited on
Commit
bdd8b4d
1 Parent(s): 6ba5416

Upload LlamaForCausalLM

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "dhmeltzer/llama-7b-SFT_eli5_wiki65k_1024_r_64_alpha_16_merged",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -15,6 +15,18 @@
15
  "num_hidden_layers": 32,
16
  "num_key_value_heads": 32,
17
  "pretraining_tp": 1,
 
 
 
 
 
 
 
 
 
 
 
 
18
  "rms_norm_eps": 1e-05,
19
  "rope_scaling": null,
20
  "tie_word_embeddings": false,
 
1
  {
2
+ "_name_or_path": "meta-llama/Llama-2-7b-hf",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
15
  "num_hidden_layers": 32,
16
  "num_key_value_heads": 32,
17
  "pretraining_tp": 1,
18
+ "quantization_config": {
19
+ "bnb_4bit_compute_dtype": "bfloat16",
20
+ "bnb_4bit_quant_type": "nf4",
21
+ "bnb_4bit_use_double_quant": true,
22
+ "llm_int8_enable_fp32_cpu_offload": false,
23
+ "llm_int8_has_fp16_weight": false,
24
+ "llm_int8_skip_modules": null,
25
+ "llm_int8_threshold": 6.0,
26
+ "load_in_4bit": true,
27
+ "load_in_8bit": false,
28
+ "quant_method": "bitsandbytes"
29
+ },
30
  "rms_norm_eps": 1e-05,
31
  "rope_scaling": null,
32
  "tie_word_embeddings": false,
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:347476758ada3bd787682aae97dbfa725e81b0ee19258ddc2bcbf2f17326e0ba
3
  size 9976570520
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:340064c9e2d4115ec1d94c54b66b58986f05113fe1544f82507be7a4d57c6171
3
  size 9976570520
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:422532b24a7b4deb1a1863aa691ac2c38421a4593a6237ebb3d42815a2637535
3
  size 3500294544
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af4dd88bdd4b800dc3dccde63c3c99cdba58601ad10189909eb13241b086358a
3
  size 3500294544