all-MiniLM-L6-v2-onnx / quantize_config.json
shuttie's picture
add quantized version
1e6ba95
raw
history blame contribute delete
674 Bytes
{
"per_channel": true,
"reduce_range": true,
"per_model_config": {
"model": {
"op_types": [
"Erf",
"Transpose",
"Slice",
"Reshape",
"Unsqueeze",
"Sub",
"Constant",
"Sqrt",
"MatMul",
"Cast",
"Mul",
"Div",
"Add",
"Softmax",
"Concat",
"Gather",
"Shape",
"ReduceMean",
"Pow"
],
"weight_type": "QInt8"
}
}
}