{ "builder_config": { "autopp_config": null, "gather_context_logits": false, "gather_generation_logits": false, "hidden_act": "swiglu", "hidden_size": 4096, "int8": false, "lora_target_modules": null, "max_batch_size": 8, "max_beam_width": 1, "max_input_len": 2048, "max_num_tokens": null, "max_output_len": 512, "max_position_embeddings": 32768, "max_prompt_embedding_table_size": 0, "mlp_hidden_size": 14336, "name": "llama", "num_heads": 32, "num_kv_heads": 8, "num_layers": 32, "parallel_build": true, "pipeline_parallel": 1, "precision": "bfloat16", "quant_mode": 0, "tensor_parallel": 2, "use_refit": false, "vocab_size": 32000 }, "plugin_config": { "attention_qk_half_accumulation": false, "bert_attention_plugin": false, "context_fmha_type": 1, "gemm_plugin": "bfloat16", "gpt_attention_plugin": "bfloat16", "identity_plugin": false, "layernorm_plugin": false, "layernorm_quantization_plugin": false, "lookup_plugin": false, "lora_plugin": false, "multi_block_mode": false, "nccl_plugin": "bfloat16", "paged_kv_cache": true, "quantize_per_token_plugin": false, "quantize_tensor_plugin": false, "remove_input_padding": true, "rmsnorm_plugin": false, "rmsnorm_quantization_plugin": false, "smooth_quant_gemm_plugin": false, "tokens_per_block": 128, "use_context_fmha_for_generation": false, "use_custom_all_reduce": false, "use_paged_context_fmha": false, "weight_only_groupwise_quant_matmul_plugin": false, "weight_only_quant_matmul_plugin": false } }