updated the generator to use temperature and sampling
Browse files
README.md
CHANGED
@@ -39,13 +39,19 @@ ARTICLE_TO_SUMMARIZE = ""
|
|
39 |
# generate summary
|
40 |
input_ids = tokenizer.encode(ARTICLE_TO_SUMMARIZE, return_tensors='pt')
|
41 |
summary_ids = model.generate(input_ids,
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
|
|
46 |
early_stopping=True,
|
47 |
no_repeat_ngram_size=2,
|
48 |
-
use_cache=True
|
|
|
|
|
|
|
|
|
|
|
49 |
summary_text = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
50 |
print(summary_text)
|
51 |
```
|
|
|
39 |
# generate summary
|
40 |
input_ids = tokenizer.encode(ARTICLE_TO_SUMMARIZE, return_tensors='pt')
|
41 |
summary_ids = model.generate(input_ids,
|
42 |
+
min_length=20,
|
43 |
+
max_length=80,
|
44 |
+
num_beams=10,
|
45 |
+
repetition_penalty=2.5,
|
46 |
+
length_penalty=1.0,
|
47 |
early_stopping=True,
|
48 |
no_repeat_ngram_size=2,
|
49 |
+
use_cache=True,
|
50 |
+
do_sample = True,
|
51 |
+
temperature = 0.8,
|
52 |
+
top_k = 50,
|
53 |
+
top_p = 0.95)
|
54 |
+
|
55 |
summary_text = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
56 |
print(summary_text)
|
57 |
```
|