vicuna / models /config.yaml
albertsoma's picture
Upload 123 files
16c358c
raw
history blame
1.13 kB
.*:
wbits: 'None'
model_type: 'None'
groupsize: 'None'
pre_layer: 0
mode: 'cai-chat'
skip_special_tokens: true
custom_stopping_strings: ''
llama-[0-9]*b-4bit$:
wbits: 4
model_type: 'llama'
.*-(4bit|int4)-(gr128|128g):
wbits: 4
groupsize: 128
.*-(gr128|128g)-(4bit|int4):
wbits: 4
groupsize: 128
.*-3bit-(gr128|128g):
wbits: 3
groupsize: 128
.*-(gr128|128g)-3bit:
wbits: 3
groupsize: 128
.*oasst-sft-1-pythia-12b:
mode: 'instruct'
instruction_template: 'Open Assistant'
.*vicuna:
mode: 'instruct'
instruction_template: 'Vicuna'
.*alpaca:
mode: 'instruct'
instruction_template: 'Alpaca'
.*alpaca-native-4bit:
mode: 'instruct'
instruction_template: 'Alpaca'
wbits: 4
groupsize: 128
.*(galactica|oasst):
skip_special_tokens: false
.*dolly-v[0-9]-[0-9]*b:
mode: 'instruct'
instruction_template: 'Alpaca'
skip_special_tokens: false
custom_stopping_strings: '"### End"'
.*koala:
mode: 'instruct'
instruction_template: 'Koala'
.*chatglm:
mode: 'instruct'
instruction_template: 'ChatGLM'
.*llava:
mode: 'instruct'
model_type: 'llama'
instruction_template: 'LLaVA'