{ "one_external_file": true, "opset": null, "optimization": {}, "optimum_version": "1.19.0", "quantization": { "activations_dtype": "QUInt8", "activations_symmetric": false, "format": "QOperator", "is_static": false, "mode": "IntegerOps", "nodes_to_exclude": [], "nodes_to_quantize": [], "operators_to_quantize": [ "Conv", "MatMul", "Attention", "LSTM", "Gather", "Transpose", "EmbedLayerNormalization" ], "per_channel": true, "qdq_add_pair_to_weight": false, "qdq_dedicated_pair": false, "qdq_op_type_per_channel_support_to_axis": { "MatMul": 1 }, "reduce_range": false, "weights_dtype": "QUInt8", "weights_symmetric": true }, "transformers_version": "4.39.3", "use_external_data_format": false }