|
from transformers import AutoTokenizer |
|
from transformers import AutoModelForCausalLM |
|
|
|
model = AutoModelForCausalLM.from_pretrained("yanolja/EEVE-Korean-Instruct-10.8B-v1.0") |
|
tokenizer = AutoTokenizer.from_pretrained("yanolja/EEVE-Korean-Instruct-10.8B-v1.0") |
|
|
|
prompt_template = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\nHuman: {prompt}\nAssistant:\n" |
|
text = 'νκ΅μ μλλ μ΄λμΈκ°μ? μλ μ νμ§ μ€ κ³¨λΌμ£ΌμΈμ.\n\n(A) κ²½μ±\n(B) λΆμ°\n(C) νμ\n(D) μμΈ\n(E) μ μ£Ό' |
|
model_inputs = tokenizer(prompt_template.format(prompt=text), return_tensors='pt') |
|
|
|
outputs = model.generate(**model_inputs, max_new_tokens=256) |
|
output_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0] |
|
print(output_text) |
|
|