File size: 2,759 Bytes
2b66ced
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
06bb18a
 
 
 
 
 
 
 
 
 
2b66ced
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
def set_cora_preset():
    return (
        "gsarti/cora_mgen",  # model_name_or_path
        "<Q>:{current} <P>:{context}",  # input_template
        "<Q>:{current}",  # input_current_text_template
    )


def set_default_preset():
    return (
        "gpt2",  # model_name_or_path
        "{current} {context}",  # input_template
        "{current}",  # input_current_template
        "{current}",  # output_template
        [],  # special_tokens_to_keep
        "",  # decoder_input_output_separator
        "{}",  # model_kwargs
        "{}",  # tokenizer_kwargs
        "{}",  # generation_kwargs
        "{}",  # attribution_kwargs
    )


def set_zephyr_preset():
    return (
        "stabilityai/stablelm-2-zephyr-1_6b",  # model_name_or_path
        "<|system|>\n{context}</s>\n<|user|>\n{current}</s>\n<|assistant|>\n",  # input_template
        "<|user|>\n{current}</s>\n<|assistant|>\n",  # input_current_text_template
        "\n",  # decoder_input_output_separator
    )


def set_chatml_preset():
    return (
        "Qwen/Qwen1.5-0.5B-Chat",  # model_name_or_path
        "<|im_start|>system\n{context}<|im_end|>\n<|im_start|>user\n{current}<|im_end|>\n<|im_start|>assistant\n",  # input_template
        "<|im_start|>user\n{current}<|im_end|>\n<|im_start|>assistant\n",  # input_current_text_template
        "",  # decoder_input_output_separator
        ["<|im_start|>", "<|im_end|>"],  # special_tokens_to_keep
    )


def set_mmt_preset():
    return (
        "facebook/mbart-large-50-one-to-many-mmt",  # model_name_or_path
        "{context} {current}",  # input_template
        "{context} {current}",  # output_template
        '{\n\t"src_lang": "en_XX",\n\t"tgt_lang": "fr_XX"\n}',  # tokenizer_kwargs
    )


def set_towerinstruct_preset():
    return (
        "Unbabel/TowerInstruct-7B-v0.1",  # model_name_or_path
        "<|im_start|>user\nSource: {current}\nContext: {context}\nTranslate the above text into French. Use the context to guide your answer.\nTarget:<|im_end|>\n<|im_start|>assistant\n",  # input_template
        "<|im_start|>user\nSource: {current}\nTranslate the above text into French.\nTarget:<|im_end|>\n<|im_start|>assistant\n",  # input_current_text_template
        "",  # decoder_input_output_separator
        ["<|im_start|>", "<|im_end|>"],  # special_tokens_to_keep
    )

def set_gemma_preset():
    return (
        "google/gemma-2b-it", # model_name_or_path
        "<start_of_turn>user\n{context}\n{current}<end_of_turn>\n<start_of_turn>model\n", # input_template
        "<start_of_turn>user\n{current}<end_of_turn>\n<start_of_turn>model\n", # input_current_text_template
        "", # decoder_input_output_separator
        ["<start_of_turn>", "<end_of_turn>"], # special_tokens_to_keep
    )