File size: 1,571 Bytes
b6701ed |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
{
"builder_config": {
"gather_all_token_logits": false,
"hidden_act": "swiglu",
"hidden_size": 4096,
"int8": false,
"lora_target_modules": [],
"max_batch_size": 8,
"max_beam_width": 1,
"max_input_len": 2048,
"max_num_tokens": null,
"max_output_len": 512,
"max_position_embeddings": 32768,
"max_prompt_embedding_table_size": 0,
"name": "llama",
"num_heads": 32,
"num_kv_heads": 8,
"num_layers": 32,
"parallel_build": true,
"pipeline_parallel": 1,
"precision": "float16",
"quant_mode": 0,
"tensor_parallel": 2,
"use_refit": false,
"vocab_size": 32000
},
"plugin_config": {
"attention_qk_half_accumulation": false,
"bert_attention_plugin": false,
"context_fmha_type": 1,
"gemm_plugin": "float16",
"gpt_attention_plugin": "float16",
"identity_plugin": false,
"layernorm_plugin": false,
"layernorm_quantization_plugin": false,
"lookup_plugin": false,
"lora_plugin": false,
"multi_block_mode": false,
"nccl_plugin": "float16",
"paged_kv_cache": true,
"quantize_per_token_plugin": false,
"quantize_tensor_plugin": false,
"remove_input_padding": true,
"rmsnorm_plugin": false,
"rmsnorm_quantization_plugin": false,
"smooth_quant_gemm_plugin": false,
"tokens_per_block": 128,
"use_context_fmha_for_generation": false,
"use_custom_all_reduce": false,
"use_paged_context_fmha": false,
"weight_only_groupwise_quant_matmul_plugin": false,
"weight_only_quant_matmul_plugin": false
}
}
|