Update README.md
Browse files
README.md
CHANGED
@@ -1,6 +1,14 @@
|
|
1 |
---
|
2 |
library_name: transformers
|
3 |
-
license:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
---
|
5 |
|
6 |
|
@@ -37,42 +45,39 @@ If you wish to use the original data rather than our training data, please conta
|
|
37 |
## Conducting Conversation
|
38 |
|
39 |
import re
|
|
|
40 |
def gen(x):
|
41 |
inputs = tokenizer(f"### ์ง๋ฌธ: {x}\n\n### ๋ต๋ณ:", return_tensors='pt', return_token_type_ids=False)
|
42 |
|
43 |
-
#
|
44 |
inputs = {k: v.to(device="cuda" if torch.cuda.is_available() else "cpu") for k, v in inputs.items()}
|
45 |
|
46 |
gened = model.generate(
|
47 |
**inputs,
|
48 |
-
max_new_tokens=256,
|
49 |
early_stopping=True,
|
50 |
-
num_return_sequences=
|
51 |
-
do_sample=True,
|
52 |
-
eos_token_id=tokenizer.eos_token_id,
|
53 |
-
temperature=0.9,
|
54 |
-
top_p=0.8,
|
55 |
-
top_k=50
|
56 |
)
|
57 |
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
complete_answer = temp_answer[:second_answer_start_idx].strip()
|
70 |
-
else:
|
71 |
-
complete_answer = temp_answer # ๋ ๋ฒ์งธ "### ๋ต๋ณ:"์ด ์๋ ๊ฒฝ์ฐ ์ ์ฒด ๋ต๋ณ ๋ฐํ
|
72 |
-
|
73 |
-
complete_answers.append(complete_answer)
|
74 |
|
75 |
-
return
|
|
|
76 |
|
77 |
|
78 |
|
@@ -133,4 +138,4 @@ If you wish to use the original data rather than our training data, please conta
|
|
133 |
|
134 |
## Citation [optional]
|
135 |
|
136 |
-
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
|
|
1 |
---
|
2 |
library_name: transformers
|
3 |
+
license: cc-by-nc-4.0
|
4 |
+
datasets:
|
5 |
+
- kyujinpy/KOR-OpenOrca-Platypus-v3
|
6 |
+
language:
|
7 |
+
- ko
|
8 |
+
- en
|
9 |
+
tags:
|
10 |
+
- Economic
|
11 |
+
- Finance
|
12 |
---
|
13 |
|
14 |
|
|
|
45 |
## Conducting Conversation
|
46 |
|
47 |
import re
|
48 |
+
|
49 |
def gen(x):
|
50 |
inputs = tokenizer(f"### ์ง๋ฌธ: {x}\n\n### ๋ต๋ณ:", return_tensors='pt', return_token_type_ids=False)
|
51 |
|
52 |
+
# ๋ฐ์ดํฐ๋ฅผ GPU๋ก ์ด๋(์ฌ์ฉ ๊ฐ๋ฅํ ๊ฒฝ์ฐ)
|
53 |
inputs = {k: v.to(device="cuda" if torch.cuda.is_available() else "cpu") for k, v in inputs.items()}
|
54 |
|
55 |
gened = model.generate(
|
56 |
**inputs,
|
57 |
+
max_new_tokens=256, # ์๋ก ์์ฑํ ํ ํฐ์ ์ต๋ ๊ฐ์
|
58 |
early_stopping=True,
|
59 |
+
num_return_sequences=1, # ํ๋์ ๋ต๋ณ๋ง ์์ฑ
|
60 |
+
do_sample=True, # ๋ค์ํ ๋ต๋ณ ์์ฑ์ ์ํด ์ํ๋ง ํ์ฑํ
|
61 |
+
eos_token_id=tokenizer.eos_token_id, # EOS ํ ํฐ ID ์ฌ์ฉ
|
62 |
+
temperature=0.9, # ์์ฑ ๋ค์์ฑ ์กฐ์ ์ ์ํ ์จ๋ ์ค์
|
63 |
+
top_p=0.8, # nucleus sampling์์ ์ฌ์ฉํ p ๊ฐ
|
64 |
+
top_k=50 # top-k sampling์์ ์ฌ์ฉํ k ๊ฐ
|
65 |
)
|
66 |
|
67 |
+
# ์์ฑ๋ ์ํ์ค๋ฅผ ๋์ฝ๋ํ์ฌ ์ถ๋ ฅ ํ
์คํธ๋ก ๋ณํ
|
68 |
+
decoded = tokenizer.decode(gened[0], skip_special_tokens=True).strip()
|
69 |
+
|
70 |
+
# "### ๋ต๋ณ:" ๋ฌธ์์ด ์ดํ์ ํ
์คํธ๋ง ์ถ์ถ
|
71 |
+
answer_start_idx = decoded.find("### ๋ต๋ณ:") + len("### ๋ต๋ณ:")
|
72 |
+
complete_answer = decoded[answer_start_idx:].strip()
|
73 |
+
|
74 |
+
# ์ฒซ ๋ฒ์งธ ๊ตฌ๋์ (. ? !)์ ์ฐพ์์ ๊ทธ ๋ถ๋ถ๊น์ง๋ง ์ถ์ถ
|
75 |
+
match = re.search(r"[\.\?\!][^\.\?\!]*$", complete_answer)
|
76 |
+
if match:
|
77 |
+
complete_answer = complete_answer[:match.end()].strip()
|
|
|
|
|
|
|
|
|
|
|
78 |
|
79 |
+
return complete_answer
|
80 |
+
|
81 |
|
82 |
|
83 |
|
|
|
138 |
|
139 |
## Citation [optional]
|
140 |
|
141 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|