{ "architectures": [ "QWenLMHeadModel" ], "auto_map": { "AutoConfig": "configuration_qwen.QWenConfig", "AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel" }, "attn_dropout_prob": 0.0, "bf16": false, "emb_dropout_prob": 0.0, "fp16": true, "fp32": false, "hidden_size": 4096, "intermediate_size": 22016, "initializer_range": 0.02, "kv_channels": 128, "layer_norm_epsilon": 1e-06, "max_position_embeddings": 32768, "model_type": "qwen", "no_bias": true, "num_attention_heads": 32, "num_hidden_layers": 32, "onnx_safe": null, "quantization_config": { "bits": 4, "group_size": 128, "damp_percent": 0.01, "desc_act": false, "static_groups": false, "sym": true, "true_sequential": true, "model_name_or_path": null, "model_file_base_name": "model", "quant_method": "gptq" }, "rotary_emb_base": 10000, "rotary_pct": 1.0, "scale_attn_weights": true, "seq_length": 8192, "tie_word_embeddings": false, "tokenizer_class": "QWenTokenizer", "transformers_version": "4.32.0", "use_cache": true, "use_dynamic_ntk": true, "use_flash_attn": "auto", "use_triton": "auto", "use_logn_attn": true, "vocab_size": 151936 }