Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
clean up desc
Browse files
app.py
CHANGED
@@ -103,7 +103,9 @@ if __name__ == "__main__":
|
|
103 |
model, tokenizer = load_model_and_tokenizer("pszemraj/led-large-book-summary")
|
104 |
model_sm, tokenizer_sm = load_model_and_tokenizer("pszemraj/led-base-book-summary")
|
105 |
title = "Long-Form Summarization: LED & BookSum"
|
106 |
-
|
|
|
|
|
107 |
gr.Interface(
|
108 |
proc_submission,
|
109 |
inputs=[
|
|
|
103 |
model, tokenizer = load_model_and_tokenizer("pszemraj/led-large-book-summary")
|
104 |
model_sm, tokenizer_sm = load_model_and_tokenizer("pszemraj/led-base-book-summary")
|
105 |
title = "Long-Form Summarization: LED & BookSum"
|
106 |
+
|
107 |
+
description = "A simple demo using a fine-tuned LED model to summarize long-form text. [This model](https://huggingface.co/pszemraj/led-large-book-summary) is a fine-tuned checkpoint of [allenai/led-large-16384](https://huggingface.co/allenai/led-large-16384) on the [BookSum dataset](https://arxiv.org/abs/2105.08209).The goal was to create a model that can generalize well and is useful in summarizing lots of text in academic and daily usage. See [model card](https://huggingface.co/pszemraj/led-large-book-summary) for a notebook with GPU inference (much faster) on Colab."
|
108 |
+
|
109 |
gr.Interface(
|
110 |
proc_submission,
|
111 |
inputs=[
|