File size: 489 Bytes
1bfb947
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
quant_stage:
  quant_modifiers:
    SmoothQuantModifier:
      smoothing_strength: 0.7
      mappings:
      - - ['re:.*q_proj', 're:.*k_proj', 're:.*v_proj']
        - re:.*input_layernorm
      - - ['re:.*gate_proj', 're:.*up_proj']
        - re:.*post_attention_layernorm
      - - ['re:.*down_proj']
        - re:.*up_proj
    GPTQModifier:
      sequential_update: true
      dampening_frac: 0.01
      ignore: [lm_head]
      scheme: W8A8
      targets: [Linear]
      observer: mse