File size: 124 Bytes
14a1fe3
 
 
 
 
 
1
2
3
4
5
6
7
quant_stage:
  quant_modifiers:
    QuantizationModifier:
      ignore: [lm_head]
      targets: [Linear]
      scheme: FP8