Update README.md
Browse files
README.md
CHANGED
@@ -23,7 +23,7 @@ model = BartForConditionalGeneration.from_pretrained('gogamza/kobart-summarizati
|
|
23 |
text = "과거를 떠올려보자. 방송을 보던 우리의 모습을..."
|
24 |
|
25 |
raw_input_ids = tokenizer.encode(text)
|
26 |
-
input_ids = [tokenizer.bos_token_id] +
|
27 |
raw_input_ids + [tokenizer.eos_token_id]
|
28 |
summary_ids = model.generate(torch.tensor([input_ids]),
|
29 |
max_length=150,
|
@@ -34,11 +34,7 @@ summary_ids = model.generate(torch.tensor([input_ids]),
|
|
34 |
summ_text = tokenizer.batch_decode(summary_ids.tolist(), skip_special_tokens=True)[0]
|
35 |
```
|
36 |
|
37 |
-
## Demo
|
38 |
|
39 |
-
- [요약 데모](http://52.231.69.211:8081/)
|
40 |
-
|
41 |
-
![](summ.png)
|
42 |
|
43 |
|
44 |
|
|
|
23 |
text = "과거를 떠올려보자. 방송을 보던 우리의 모습을..."
|
24 |
|
25 |
raw_input_ids = tokenizer.encode(text)
|
26 |
+
input_ids = [tokenizer.bos_token_id] + \\
|
27 |
raw_input_ids + [tokenizer.eos_token_id]
|
28 |
summary_ids = model.generate(torch.tensor([input_ids]),
|
29 |
max_length=150,
|
|
|
34 |
summ_text = tokenizer.batch_decode(summary_ids.tolist(), skip_special_tokens=True)[0]
|
35 |
```
|
36 |
|
|
|
37 |
|
|
|
|
|
|
|
38 |
|
39 |
|
40 |
|