File size: 851 Bytes
6f95380
 
46772b0
6f95380
 
46772b0
6f95380
 
 
46772b0
6f95380
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
from transformers import AutoTokenizer
from transformers import AutoModelForCausalLM

model = AutoModelForCausalLM.from_pretrained("yanolja/EEVE-Korean-Instruct-10.8B-v1.0")
tokenizer = AutoTokenizer.from_pretrained("yanolja/EEVE-Korean-Instruct-10.8B-v1.0")

prompt_template = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\nHuman: {prompt}\nAssistant:\n"
text = 'ํ•œ๊ตญ์˜ ์ˆ˜๋„๋Š” ์–ด๋””์ธ๊ฐ€์š”? ์•„๋ž˜ ์„ ํƒ์ง€ ์ค‘ ๊ณจ๋ผ์ฃผ์„ธ์š”.\n\n(A) ๊ฒฝ์„ฑ\n(B) ๋ถ€์‚ฐ\n(C) ํ‰์–‘\n(D) ์„œ์šธ\n(E) ์ „์ฃผ'
model_inputs = tokenizer(prompt_template.format(prompt=text), return_tensors='pt')

outputs = model.generate(**model_inputs, max_new_tokens=256)
output_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
print(output_text)