File size: 4,676 Bytes
0606100 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
import os
from dataclasses import dataclass, field
from typing import Optional, Set
import onnx
from onnxruntime.quantization import (
quantize_dynamic,
QuantType
)
from optimum.exporters.tasks import TasksManager
from transformers import (
AutoConfig,
HfArgumentParser
)
DEFAULT_QUANTIZE_PARAMS = {
'per_channel': True,
'reduce_range': True,
}
MODEL_SPECIFIC_QUANTIZE_PARAMS = {
'whisper': {
'per_channel': False,
'reduce_range': False,
}
}
MODELS_WITHOUT_TOKENIZERS = [
'wav2vec2'
]
@dataclass
class ConversionArguments:
"""
Arguments used for converting HuggingFace models to onnx.
"""
model_id: str = field(
metadata={
"help": "Model identifier"
}
)
quantize: bool = field(
default=False,
metadata={
"help": "Whether to quantize the model."
}
)
output_parent_dir: str = field(
default='./models/',
metadata={
"help": "Path where the converted model will be saved to."
}
)
task: Optional[str] = field(
default='auto',
metadata={
"help": (
"The task to export the model for. If not specified, the task will be auto-inferred based on the model. Available tasks depend on the model, but are among:"
f" {str(list(TasksManager._TASKS_TO_AUTOMODELS.keys()))}. For decoder models, use `xxx-with-past` to export the model using past key values in the decoder."
)
}
)
opset: int = field(
default=None,
metadata={
"help": (
"If specified, ONNX opset version to export the model with. Otherwise, the default opset will be used."
)
}
)
device: str = field(
default='cpu',
metadata={
"help": 'The device to use to do the export.'
}
)
skip_validation: bool = field(
default=False,
metadata={
"help": "Whether to skip validation of the converted model"
}
)
per_channel: bool = field(
default=None,
metadata={
"help": "Whether to quantize weights per channel"
}
)
reduce_range: bool = field(
default=None,
metadata={
"help": "Whether to quantize weights with 7-bits. It may improve the accuracy for some models running on non-VNNI machine, especially for per-channel mode"
}
)
output_attentions: bool = field(
default=False,
metadata={
"help": "Whether to output attentions from the model. NOTE: This is only supported for whisper models right now."
}
)
split_modalities: bool = field(
default=False,
metadata={
"help": "Whether to split multimodal models. NOTE: This is only supported for CLIP models right now."
}
)
def get_operators(model: onnx.ModelProto) -> Set[str]:
operators = set()
def traverse_graph(graph):
for node in graph.node:
operators.add(node.op_type)
for attr in node.attribute:
if attr.type == onnx.AttributeProto.GRAPH:
subgraph = attr.g
traverse_graph(subgraph)
traverse_graph(model.graph)
return operators
def quantize(model_path):
"""
Quantize the weights of the model from float32 to int8 to allow very efficient inference on modern CPU
Uses unsigned ints for activation values, signed ints for weights, per
https://onnxruntime.ai/docs/performance/quantization.html#data-type-selection
it is faster on most CPU architectures
Args:
onnx_model_path: Path to location the exported ONNX model is stored
Returns: The Path generated for the quantized
"""
directory_path = os.path.dirname(model_path)
loaded_model = onnx.load_model(model_path)
op_types = get_operators(loaded_model)
weight_type = QuantType.QUInt8 if 'Conv' in op_types else QuantType.QInt8
print("quantizing to", weight_type)
quantize_dynamic(
model_input=model_path,
model_output=os.path.join(directory_path, 'model-q8.onnx'),
weight_type=weight_type,
optimize_model=False,
)
def main():
"""
Example usage:
python quantize_onnx.py --model_id sentence-transformers/all-MiniLM-L6-v2-unquantized
"""
parser = HfArgumentParser(
(ConversionArguments,)
)
conv_args, = parser.parse_args_into_dataclasses()
model_id = conv_args.model_id
quantize(os.path.join(model_id, "model.onnx"))
if __name__ == '__main__':
main()
|