Text Generation
Transformers
Safetensors
Japanese
English
mistral
conversational
text-generation-inference
Inference Endpoints
leonardlin commited on
Commit
3755032
1 Parent(s): b01aa78

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -0
README.md CHANGED
@@ -3,6 +3,10 @@ license: apache-2.0
3
  language:
4
  - ja
5
  - en
 
 
 
 
6
  ---
7
  # Shisa 7B
8
 
@@ -110,6 +114,8 @@ streamer = TextStreamer(tokenizer, skip_prompt=True)
110
  # The prompt template is included in the model's tokenizer_config.json so you shouldn't need this but we've included this for convenience
111
  # tokenizer.chat_template = ""{%- for idx in range(0, messages|length) -%}\n{%- if messages[idx]['role'] == 'user' -%}\n{%- if idx > 1 -%}\n{{- bos_token + '[INST] ' + messages[idx]['content'] + ' [/INST]' -}}\n{%- else -%}\n{{- messages[idx]['content'] + ' [/INST]' -}}\n{%- endif -%}\n{% elif messages[idx]['role'] == 'system' %}\n{{- bos_token + '[INST] <<SYS>>\\n' + messages[idx]['content'] + '\\n<</SYS>>\\n\\n' -}}\n{%- elif messages[idx]['role'] == 'assistant' -%}\n{{- ' ' + messages[idx]['content'] + ' ' + eos_token -}}\n{% endif %}\n{% endfor %}\n"
112
 
 
 
113
  # You are an avid Pokemon fanatic.
114
  prompt = "あなたは熱狂的なポケモンファンです。"
115
  chat = [{"role": "system", "content": prompt}]
@@ -293,6 +299,8 @@ streamer = TextStreamer(tokenizer, skip_prompt=True)
293
  # プロンプトテンプレートはモデルのtokenizer_config.jsonに含まれているので、これは必要ないはずですが、便宜上こちらにも掲載しています
294
  # tokenizer.chat_template = ""{%- for idx in range(0, messages|length) -%}\n{%- if messages[idx]['role'] == 'user' -%}\n{%- if idx > 1 -%}\n{{- bos_token + '[INST] ' + messages[idx]['content'] + ' [/INST]' -}}\n{%- else -%}\n{{- messages[idx]['content'] + ' [/INST]' -}}\n{%- endif -%}\n{% elif messages[idx]['role'] == 'system' %}\n{{- bos_token + '[INST] <<SYS>>\\n' + messages[idx]['content'] + '\\n<</SYS>>\\n\\n' -}}\n{%- elif messages[idx]['role'] == 'assistant' -%}\n{{- ' ' + messages[idx]['content'] + ' ' + eos_token -}}\n{% endif %}\n{% endfor %}\n"
295
 
 
 
296
  # You are an avid Pokemon fanatic.
297
  prompt = "あなたは熱狂的なポケモンファンです。"
298
  chat = [{"role": "system", "content": prompt}]
 
3
  language:
4
  - ja
5
  - en
6
+ datasets:
7
+ - augmxnt/ultra-orca-boros-en-ja-v1
8
+ - Open-Orca/SlimOrca
9
+ - augmxnt/shisa-en-ja-dpo-v1
10
  ---
11
  # Shisa 7B
12
 
 
114
  # The prompt template is included in the model's tokenizer_config.json so you shouldn't need this but we've included this for convenience
115
  # tokenizer.chat_template = ""{%- for idx in range(0, messages|length) -%}\n{%- if messages[idx]['role'] == 'user' -%}\n{%- if idx > 1 -%}\n{{- bos_token + '[INST] ' + messages[idx]['content'] + ' [/INST]' -}}\n{%- else -%}\n{{- messages[idx]['content'] + ' [/INST]' -}}\n{%- endif -%}\n{% elif messages[idx]['role'] == 'system' %}\n{{- bos_token + '[INST] <<SYS>>\\n' + messages[idx]['content'] + '\\n<</SYS>>\\n\\n' -}}\n{%- elif messages[idx]['role'] == 'assistant' -%}\n{{- ' ' + messages[idx]['content'] + ' ' + eos_token -}}\n{% endif %}\n{% endfor %}\n"
116
 
117
+ # A more typical prompt: あなたは役に立つアシスタントです。("You are a helpful assistant.")
118
+
119
  # You are an avid Pokemon fanatic.
120
  prompt = "あなたは熱狂的なポケモンファンです。"
121
  chat = [{"role": "system", "content": prompt}]
 
299
  # プロンプトテンプレートはモデルのtokenizer_config.jsonに含まれているので、これは必要ないはずですが、便宜上こちらにも掲載しています
300
  # tokenizer.chat_template = ""{%- for idx in range(0, messages|length) -%}\n{%- if messages[idx]['role'] == 'user' -%}\n{%- if idx > 1 -%}\n{{- bos_token + '[INST] ' + messages[idx]['content'] + ' [/INST]' -}}\n{%- else -%}\n{{- messages[idx]['content'] + ' [/INST]' -}}\n{%- endif -%}\n{% elif messages[idx]['role'] == 'system' %}\n{{- bos_token + '[INST] <<SYS>>\\n' + messages[idx]['content'] + '\\n<</SYS>>\\n\\n' -}}\n{%- elif messages[idx]['role'] == 'assistant' -%}\n{{- ' ' + messages[idx]['content'] + ' ' + eos_token -}}\n{% endif %}\n{% endfor %}\n"
301
 
302
+ # より典型的なプロンプト: あなたは役に立つアシスタントです。
303
+
304
  # You are an avid Pokemon fanatic.
305
  prompt = "あなたは熱狂的なポケモンファンです。"
306
  chat = [{"role": "system", "content": prompt}]