Minor fixes to presets
Browse files- app.py +3 -2
- presets.py +3 -2
app.py
CHANGED
@@ -80,8 +80,8 @@ def pecore(
|
|
80 |
loaded_model = HuggingfaceModel.load(
|
81 |
model_name_or_path,
|
82 |
attribution_method,
|
83 |
-
model_kwargs=json.loads(model_kwargs),
|
84 |
-
tokenizer_kwargs=json.loads(tokenizer_kwargs),
|
85 |
)
|
86 |
if loaded_model.tokenizer.pad_token is None:
|
87 |
loaded_model.tokenizer.add_special_tokens({"pad_token": "[PAD]"})
|
@@ -629,6 +629,7 @@ with gr.Blocks(css=custom_css) as demo:
|
|
629 |
outputs=[
|
630 |
model_name_or_path,
|
631 |
input_template,
|
|
|
632 |
contextless_input_template,
|
633 |
special_tokens_to_keep,
|
634 |
generation_kwargs,
|
|
|
80 |
loaded_model = HuggingfaceModel.load(
|
81 |
model_name_or_path,
|
82 |
attribution_method,
|
83 |
+
model_kwargs={**json.loads(model_kwargs), **{"token": os.environ["HF_TOKEN"]}},
|
84 |
+
tokenizer_kwargs={**json.loads(tokenizer_kwargs), **{"token": os.environ["HF_TOKEN"]}},
|
85 |
)
|
86 |
if loaded_model.tokenizer.pad_token is None:
|
87 |
loaded_model.tokenizer.add_special_tokens({"pad_token": "[PAD]"})
|
|
|
629 |
outputs=[
|
630 |
model_name_or_path,
|
631 |
input_template,
|
632 |
+
decoder_input_output_separator,
|
633 |
contextless_input_template,
|
634 |
special_tokens_to_keep,
|
635 |
generation_kwargs,
|
presets.py
CHANGED
@@ -29,8 +29,9 @@ def set_default_preset():
|
|
29 |
def set_zephyr_preset():
|
30 |
return (
|
31 |
"stabilityai/stablelm-2-zephyr-1_6b", # model_name_or_path
|
32 |
-
"<|system|>{system_prompt}<|endoftext|>\n<|user|>\n{context}\n\n{current}<|endoftext|>\n<|assistant
|
33 |
-
"
|
|
|
34 |
["<|im_start|>", "<|im_end|>", "<|endoftext|>"], # special_tokens_to_keep
|
35 |
'{\n\t"max_new_tokens": 50\n}', # generation_kwargs
|
36 |
)
|
|
|
29 |
def set_zephyr_preset():
|
30 |
return (
|
31 |
"stabilityai/stablelm-2-zephyr-1_6b", # model_name_or_path
|
32 |
+
"<|system|>{system_prompt}<|endoftext|>\n<|user|>\n{context}\n\n{current}<|endoftext|>\n<|assistant|>".replace("{system_prompt}", SYSTEM_PROMPT), # input_template
|
33 |
+
"\n", # decoder_input_output_separator
|
34 |
+
"<|system|>{system_prompt}<|endoftext|>\n<|user|>\n{current}<|endoftext|>\n<|assistant|>".replace("{system_prompt}", SYSTEM_PROMPT), # input_current_text_template
|
35 |
["<|im_start|>", "<|im_end|>", "<|endoftext|>"], # special_tokens_to_keep
|
36 |
'{\n\t"max_new_tokens": 50\n}', # generation_kwargs
|
37 |
)
|