Update README.md
Browse files
README.md
CHANGED
@@ -20,13 +20,13 @@ OUTPUT = ๊ฐ label์ ๋ง๋ ๋ด์ค ๊ธฐ์ฌ ์ ๋ชฉ์ ์์ฑํฉ๋๋ค.
|
|
20 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
21 |
|
22 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
23 |
-
model_dir = "kfkas/t5-
|
24 |
tokenizer = AutoTokenizer.from_pretrained(model_dir)
|
25 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_dir)
|
26 |
model.to(device)
|
27 |
|
28 |
label_list = ['IT๊ณผํ','๊ฒฝ์ ','์ฌํ','์ํ๋ฌธํ','์ธ๊ณ','์คํฌ์ธ ','์ ์น']
|
29 |
-
text = "
|
30 |
|
31 |
inputs = tokenizer.encode(text, max_length=256, truncation=True, return_tensors="pt")
|
32 |
with torch.no_grad():
|
@@ -38,10 +38,11 @@ with torch.no_grad():
|
|
38 |
top_p=0.95, # ๋์ ํ๋ฅ ์ด 95%์ธ ํ๋ณด์งํฉ์์๋ง ์์ฑ
|
39 |
)
|
40 |
decoded_output = tokenizer.decode(output, skip_special_tokens=True)[0]
|
41 |
-
print(predicted_title)
|
42 |
```
|
43 |
|
44 |
|
|
|
45 |
## Intended uses & limitations
|
46 |
|
47 |
More information needed
|
|
|
20 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
21 |
|
22 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
23 |
+
model_dir = "kfkas/t5-large-korean-news-title-klue-ynat"
|
24 |
tokenizer = AutoTokenizer.from_pretrained(model_dir)
|
25 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_dir)
|
26 |
model.to(device)
|
27 |
|
28 |
label_list = ['IT๊ณผํ','๊ฒฝ์ ','์ฌํ','์ํ๋ฌธํ','์ธ๊ณ','์คํฌ์ธ ','์ ์น']
|
29 |
+
text = "๊ฒฝ์ "
|
30 |
|
31 |
inputs = tokenizer.encode(text, max_length=256, truncation=True, return_tensors="pt")
|
32 |
with torch.no_grad():
|
|
|
38 |
top_p=0.95, # ๋์ ํ๋ฅ ์ด 95%์ธ ํ๋ณด์งํฉ์์๋ง ์์ฑ
|
39 |
)
|
40 |
decoded_output = tokenizer.decode(output, skip_special_tokens=True)[0]
|
41 |
+
print(predicted_title)#์ ๋ถ ๊ธฐ์
๊ณ ์ฉ์ฐฝ์ถยท์ฑ์ฅ ์ด์ง ์ํ ๊ฒฝ์ ์ ์ฑ
ํ๋ ์ฃผ๋ชฉ
|
42 |
```
|
43 |
|
44 |
|
45 |
+
|
46 |
## Intended uses & limitations
|
47 |
|
48 |
More information needed
|