sharpenb commited on
Commit
a99c0b4
·
verified ·
1 Parent(s): 6e4ab1f

Upload folder using huggingface_hub (#2)

Browse files

- db75fa1b1119a2a08f947751c2cdac8c13c66105da425b0db771df46af9ecb8f (7217d07b7882efea212db248dc3db2765dfad011)
- 4e2d9a2c5e2dc3b616bc8e0dd88618e0a5fc6230e366cc5a9a65561a79d435c1 (db11ce7b69d894bdb65536cc49bc628a3ede2fb0)
- 01c404208d582e6c388907f620356470d7d10f28d95263de47d8a7f3deb6f948 (ea2ff4810129cc8a5f2524914ef440bb5c6d9875)

Files changed (2) hide show
  1. config.json +1 -1
  2. smash_config.json +1 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmpso_kvfb0tzww5ohy",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmpe2_nve5e77cbpf3a",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmpso_kvfb0",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmpe2_nve5e",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}