CryogenicPlanet commited on
Commit
de8e868
1 Parent(s): d510be1

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -20,3 +20,5 @@ TP_w2/llama_float16_tp2_rank0.engine filter=lfs diff=lfs merge=lfs -text
20
  TP_w2/llama_float16_tp2_rank1.engine filter=lfs diff=lfs merge=lfs -text
21
  PP_w2/llama_float16_tp1_pp2_rank0.engine filter=lfs diff=lfs merge=lfs -text
22
  PP_w2/llama_float16_tp1_pp2_rank1.engine filter=lfs diff=lfs merge=lfs -text
 
 
 
20
  TP_w2/llama_float16_tp2_rank1.engine filter=lfs diff=lfs merge=lfs -text
21
  PP_w2/llama_float16_tp1_pp2_rank0.engine filter=lfs diff=lfs merge=lfs -text
22
  PP_w2/llama_float16_tp1_pp2_rank1.engine filter=lfs diff=lfs merge=lfs -text
23
+ PP_bf16_w2/llama_bfloat16_tp2_rank0.engine filter=lfs diff=lfs merge=lfs -text
24
+ PP_bf16_w2/llama_bfloat16_tp2_rank1.engine filter=lfs diff=lfs merge=lfs -text
PP_bf16_w2/config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_config": {
3
+ "autopp_config": null,
4
+ "gather_context_logits": false,
5
+ "gather_generation_logits": false,
6
+ "hidden_act": "swiglu",
7
+ "hidden_size": 4096,
8
+ "int8": false,
9
+ "lora_target_modules": null,
10
+ "max_batch_size": 8,
11
+ "max_beam_width": 1,
12
+ "max_input_len": 2048,
13
+ "max_num_tokens": null,
14
+ "max_output_len": 512,
15
+ "max_position_embeddings": 32768,
16
+ "max_prompt_embedding_table_size": 0,
17
+ "mlp_hidden_size": 14336,
18
+ "name": "llama",
19
+ "num_heads": 32,
20
+ "num_kv_heads": 8,
21
+ "num_layers": 32,
22
+ "parallel_build": true,
23
+ "pipeline_parallel": 1,
24
+ "precision": "bfloat16",
25
+ "quant_mode": 0,
26
+ "tensor_parallel": 2,
27
+ "use_refit": false,
28
+ "vocab_size": 32000
29
+ },
30
+ "plugin_config": {
31
+ "attention_qk_half_accumulation": false,
32
+ "bert_attention_plugin": false,
33
+ "context_fmha_type": 1,
34
+ "gemm_plugin": "bfloat16",
35
+ "gpt_attention_plugin": "bfloat16",
36
+ "identity_plugin": false,
37
+ "layernorm_plugin": false,
38
+ "layernorm_quantization_plugin": false,
39
+ "lookup_plugin": false,
40
+ "lora_plugin": false,
41
+ "multi_block_mode": false,
42
+ "nccl_plugin": "bfloat16",
43
+ "paged_kv_cache": true,
44
+ "quantize_per_token_plugin": false,
45
+ "quantize_tensor_plugin": false,
46
+ "remove_input_padding": true,
47
+ "rmsnorm_plugin": false,
48
+ "rmsnorm_quantization_plugin": false,
49
+ "smooth_quant_gemm_plugin": false,
50
+ "tokens_per_block": 128,
51
+ "use_context_fmha_for_generation": false,
52
+ "use_custom_all_reduce": false,
53
+ "use_paged_context_fmha": false,
54
+ "weight_only_groupwise_quant_matmul_plugin": false,
55
+ "weight_only_quant_matmul_plugin": false
56
+ }
57
+ }
PP_bf16_w2/llama_bfloat16_tp2_rank0.engine ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6dde3d1fffd5cfba0025ff2a577f3e80bf89c9ac2561ca24a519fae22854d3b7
3
+ size 46840558068
PP_bf16_w2/llama_bfloat16_tp2_rank1.engine ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac580b33de02b9d2d0696972e4121cd1649c2fd2438cd9af51bd1df82c0b758f
3
+ size 46840557492
PP_bf16_w2/model.cache ADDED
Binary file (262 kB). View file