Spaces:
Sleeping
Sleeping
GuysRGithub
commited on
Commit
•
2e81b54
1
Parent(s):
987e054
Update
Browse files
app.py
CHANGED
@@ -45,7 +45,7 @@ def generate_summary(question, model):
|
|
45 |
input_ids = inputs.input_ids.to(model.device)
|
46 |
attention_mask = inputs.attention_mask.to(model.device)
|
47 |
outputs = model.generate(
|
48 |
-
input_ids, attention_mask=attention_mask, max_new_tokens=
|
49 |
output_str = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
50 |
return outputs, output_str
|
51 |
|
@@ -144,14 +144,14 @@ def chat():
|
|
144 |
def post_process(output):
|
145 |
|
146 |
output = textwrap.fill(textwrap.dedent(output).strip(), width=120)
|
147 |
-
lines = output.split("
|
148 |
for line in lines:
|
149 |
for word in word_remove_sentence:
|
150 |
if word in line.lower():
|
151 |
lines.remove(line)
|
152 |
break
|
153 |
|
154 |
-
output = "
|
155 |
for item in map_words.keys():
|
156 |
output = re.sub(item, map_words[item], output, re.I)
|
157 |
|
|
|
45 |
input_ids = inputs.input_ids.to(model.device)
|
46 |
attention_mask = inputs.attention_mask.to(model.device)
|
47 |
outputs = model.generate(
|
48 |
+
input_ids, attention_mask=attention_mask, max_new_tokens=4096, do_sample=True, num_beams=4, top_k=50, early_stopping=True, no_repeat_ngram_size=2, )
|
49 |
output_str = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
50 |
return outputs, output_str
|
51 |
|
|
|
144 |
def post_process(output):
|
145 |
|
146 |
output = textwrap.fill(textwrap.dedent(output).strip(), width=120)
|
147 |
+
lines = output.split("\.")
|
148 |
for line in lines:
|
149 |
for word in word_remove_sentence:
|
150 |
if word in line.lower():
|
151 |
lines.remove(line)
|
152 |
break
|
153 |
|
154 |
+
output = "\.".join(lines)
|
155 |
for item in map_words.keys():
|
156 |
output = re.sub(item, map_words[item], output, re.I)
|
157 |
|