Update README.md
Browse files
README.md
CHANGED
@@ -1,4 +1,49 @@
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
---
|
4 |
-
llama-3-Korean-Bllossom-8B๋ฅผ Foundation์ผ๋ก KorQuad Chat Full Fine Tuning Version ์
๋๋ค.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
---
|
4 |
+
llama-3-Korean-Bllossom-8B๋ฅผ Foundation์ผ๋ก KorQuad Chat Full Fine Tuning Version ์
๋๋ค.
|
5 |
+
|
6 |
+
*Python Code
|
7 |
+
import transformers
|
8 |
+
import torch
|
9 |
+
|
10 |
+
model_id = "MLP-KTLim/llama-3-Korean-Bllossom-8B"
|
11 |
+
|
12 |
+
pipeline = transformers.pipeline(
|
13 |
+
"text-generation",
|
14 |
+
model=model_id,
|
15 |
+
model_kwargs={"torch_dtype": torch.bfloat16},
|
16 |
+
device_map="auto",
|
17 |
+
)
|
18 |
+
|
19 |
+
pipeline.model.eval()
|
20 |
+
|
21 |
+
PROMPT = '''You are a helpful AI assistant. Please answer the user's questions kindly. ๋น์ ์ ์ ๋ฅํ AI ์ด์์คํดํธ ์
๋๋ค. ์ฌ์ฉ์์ ์ง๋ฌธ์ ๋ํด ์น์ ํ๊ฒ ๋ต๋ณํด์ฃผ์ธ์.'''
|
22 |
+
instruction = "์์ธ์ ์ ๋ช
ํ ๊ด๊ด ์ฝ์ค๋ฅผ ๋ง๋ค์ด์ค๋?"
|
23 |
+
|
24 |
+
messages = [
|
25 |
+
{"role": "system", "content": f"{PROMPT}"},
|
26 |
+
{"role": "user", "content": f"{instruction}"}
|
27 |
+
]
|
28 |
+
|
29 |
+
prompt = pipeline.tokenizer.apply_chat_template(
|
30 |
+
messages,
|
31 |
+
tokenize=False,
|
32 |
+
add_generation_prompt=True
|
33 |
+
)
|
34 |
+
|
35 |
+
terminators = [
|
36 |
+
pipeline.tokenizer.eos_token_id,
|
37 |
+
pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
38 |
+
]
|
39 |
+
|
40 |
+
outputs = pipeline(
|
41 |
+
prompt,
|
42 |
+
max_new_tokens=2048,
|
43 |
+
eos_token_id=terminators,
|
44 |
+
do_sample=True,
|
45 |
+
temperature=0.6,
|
46 |
+
top_p=0.9
|
47 |
+
)
|
48 |
+
|
49 |
+
print(outputs[0]["generated_text"][len(prompt):])
|