SEOKDONG's picture
Update README.md
20a8a0e verified
metadata
license: apache-2.0

llama-3-Korean-Bllossom-8B๋ฅผ Foundation์œผ๋กœ KorQuad Chat Full Fine Tuning Version ์ž…๋‹ˆ๋‹ค.

*Python Code

import transformers import torch

model_id = "SEOKDONG/llama-3-Korean-Bllossom-8B-sft"

pipeline = transformers.pipeline( "text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device_map="auto", )

pipeline.model.eval()

PROMPT = '''You are a helpful AI assistant. Please answer the user's questions kindly. ๋‹น์‹ ์€ ์œ ๋Šฅํ•œ AI ์–ด์‹œ์Šคํ„ดํŠธ ์ž…๋‹ˆ๋‹ค. ์‚ฌ์šฉ์ž์˜ ์งˆ๋ฌธ์— ๋Œ€ํ•ด ์นœ์ ˆํ•˜๊ฒŒ ๋‹ต๋ณ€ํ•ด์ฃผ์„ธ์š”.''' instruction = "์„œ์šธ์˜ ์œ ๋ช…ํ•œ ๊ด€๊ด‘ ์ฝ”์Šค๋ฅผ ๋งŒ๋“ค์–ด์ค„๋ž˜?"

messages = [ {"role": "system", "content": f"{PROMPT}"}, {"role": "user", "content": f"{instruction}"} ]

prompt = pipeline.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True )

terminators = [ pipeline.tokenizer.eos_token_id, pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>") ]

outputs = pipeline( prompt, max_new_tokens=2048, eos_token_id=terminators, do_sample=True, temperature=0.6, top_p=0.9 )

print(outputs[0]["generated_text"][len(prompt):])