Recent work pre-training Transformers with self-supervised objectives on
large text corpora has shown great success when fine-tuned on downstream NLP
tasks including text summarization. However, pre-training objectives tailored
for abstractive text summarization have not been explored. Furthermore there is
a lack of systematic evaluation across diverse domains. In this work, we
propose pre-training large Transformer-based encoder-decoder models on massive
text corpora with a new self-supervised objective. In PEGASUS, important
sentences are removed/masked from an input document and are generated together
as one output sequence from the remaining sentences, similar to an extractive
summary. We evaluated our best PEGASUS model on 12 downstream summarization
tasks spanning news, science, stories, instructions, emails, patents, and
legislative bills. Experiments demonstrate it achieves state-of-the-art
performance on all 12 downstream datasets measured by ROUGE scores. Our model
also shows surprising performance on low-resource summarization, surpassing
previous state-of-the-art results on 6 datasets with only 1000 examples.
Finally we validated our results using human evaluation and show that our model
summaries achieve human performance on multiple datasets.
Описание
[1912.08777] PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization
%0 Generic
%1 zhang2019pegasus
%A Zhang, Jingqing
%A Zhao, Yao
%A Saleh, Mohammad
%A Liu, Peter J.
%D 2019
%K language-model masterthesis
%T PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive
Summarization
%U http://arxiv.org/abs/1912.08777
%X Recent work pre-training Transformers with self-supervised objectives on
large text corpora has shown great success when fine-tuned on downstream NLP
tasks including text summarization. However, pre-training objectives tailored
for abstractive text summarization have not been explored. Furthermore there is
a lack of systematic evaluation across diverse domains. In this work, we
propose pre-training large Transformer-based encoder-decoder models on massive
text corpora with a new self-supervised objective. In PEGASUS, important
sentences are removed/masked from an input document and are generated together
as one output sequence from the remaining sentences, similar to an extractive
summary. We evaluated our best PEGASUS model on 12 downstream summarization
tasks spanning news, science, stories, instructions, emails, patents, and
legislative bills. Experiments demonstrate it achieves state-of-the-art
performance on all 12 downstream datasets measured by ROUGE scores. Our model
also shows surprising performance on low-resource summarization, surpassing
previous state-of-the-art results on 6 datasets with only 1000 examples.
Finally we validated our results using human evaluation and show that our model
summaries achieve human performance on multiple datasets.
@misc{zhang2019pegasus,
abstract = {Recent work pre-training Transformers with self-supervised objectives on
large text corpora has shown great success when fine-tuned on downstream NLP
tasks including text summarization. However, pre-training objectives tailored
for abstractive text summarization have not been explored. Furthermore there is
a lack of systematic evaluation across diverse domains. In this work, we
propose pre-training large Transformer-based encoder-decoder models on massive
text corpora with a new self-supervised objective. In PEGASUS, important
sentences are removed/masked from an input document and are generated together
as one output sequence from the remaining sentences, similar to an extractive
summary. We evaluated our best PEGASUS model on 12 downstream summarization
tasks spanning news, science, stories, instructions, emails, patents, and
legislative bills. Experiments demonstrate it achieves state-of-the-art
performance on all 12 downstream datasets measured by ROUGE scores. Our model
also shows surprising performance on low-resource summarization, surpassing
previous state-of-the-art results on 6 datasets with only 1000 examples.
Finally we validated our results using human evaluation and show that our model
summaries achieve human performance on multiple datasets.},
added-at = {2020-12-11T08:58:25.000+0100},
author = {Zhang, Jingqing and Zhao, Yao and Saleh, Mohammad and Liu, Peter J.},
biburl = {https://www.bibsonomy.org/bibtex/2587289c76b32a460c7d0163f08453c14/festplatte},
description = {[1912.08777] PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization},
interhash = {21386a9a2c3157b4bf4fe3f67a5b958d},
intrahash = {587289c76b32a460c7d0163f08453c14},
keywords = {language-model masterthesis},
timestamp = {2021-02-11T14:24:42.000+0100},
title = {PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive
Summarization},
url = {http://arxiv.org/abs/1912.08777},
year = 2019
}