Existing pre-trained large language models have shown unparalleled generative
capabilities. However, they are not controllable. In this paper, we propose
MEGATRON-CNTRL, a novel framework that uses large-scale language models and
adds control to text generation by incorporating an external knowledge base.
Our framework consists of a keyword predictor, a knowledge retriever, a
contextual knowledge ranker, and a conditional text generator. As we do not
have access to ground-truth supervision for the knowledge ranker, we make use
of weak supervision from sentence embedding. The empirical results show that
our model generates more fluent, consistent, and coherent stories with less
repetition and higher diversity compared to prior work on the ROC story
dataset. We showcase the controllability of our model by replacing the keywords
used to generate stories and re-running the generation process. Human
evaluation results show that 77.5% of these stories are successfully controlled
by the new keywords. Furthermore, by scaling our model from 124 million to 8.3
billion parameters we demonstrate that larger models improve both the quality
of generation (from 74.5% to 93.0% for consistency) and controllability (from
77.5% to 91.5%).
Description
MEGATRON-CNTRL: Controllable Story Generation with External Knowledge Using Large-Scale Language Models
%0 Generic
%1 xu2020megatroncntrl
%A Xu, Peng
%A Patwary, Mostofa
%A Shoeybi, Mohammad
%A Puri, Raul
%A Fung, Pascale
%A Anandkumar, Anima
%A Catanzaro, Bryan
%D 2020
%K cntrl emnlp2020 generation textgeneration
%T MEGATRON-CNTRL: Controllable Story Generation with External Knowledge
Using Large-Scale Language Models
%U http://arxiv.org/abs/2010.00840
%X Existing pre-trained large language models have shown unparalleled generative
capabilities. However, they are not controllable. In this paper, we propose
MEGATRON-CNTRL, a novel framework that uses large-scale language models and
adds control to text generation by incorporating an external knowledge base.
Our framework consists of a keyword predictor, a knowledge retriever, a
contextual knowledge ranker, and a conditional text generator. As we do not
have access to ground-truth supervision for the knowledge ranker, we make use
of weak supervision from sentence embedding. The empirical results show that
our model generates more fluent, consistent, and coherent stories with less
repetition and higher diversity compared to prior work on the ROC story
dataset. We showcase the controllability of our model by replacing the keywords
used to generate stories and re-running the generation process. Human
evaluation results show that 77.5% of these stories are successfully controlled
by the new keywords. Furthermore, by scaling our model from 124 million to 8.3
billion parameters we demonstrate that larger models improve both the quality
of generation (from 74.5% to 93.0% for consistency) and controllability (from
77.5% to 91.5%).
@misc{xu2020megatroncntrl,
abstract = {Existing pre-trained large language models have shown unparalleled generative
capabilities. However, they are not controllable. In this paper, we propose
MEGATRON-CNTRL, a novel framework that uses large-scale language models and
adds control to text generation by incorporating an external knowledge base.
Our framework consists of a keyword predictor, a knowledge retriever, a
contextual knowledge ranker, and a conditional text generator. As we do not
have access to ground-truth supervision for the knowledge ranker, we make use
of weak supervision from sentence embedding. The empirical results show that
our model generates more fluent, consistent, and coherent stories with less
repetition and higher diversity compared to prior work on the ROC story
dataset. We showcase the controllability of our model by replacing the keywords
used to generate stories and re-running the generation process. Human
evaluation results show that 77.5% of these stories are successfully controlled
by the new keywords. Furthermore, by scaling our model from 124 million to 8.3
billion parameters we demonstrate that larger models improve both the quality
of generation (from 74.5% to 93.0% for consistency) and controllability (from
77.5% to 91.5%).},
added-at = {2020-11-23T17:22:30.000+0100},
author = {Xu, Peng and Patwary, Mostofa and Shoeybi, Mohammad and Puri, Raul and Fung, Pascale and Anandkumar, Anima and Catanzaro, Bryan},
biburl = {https://www.bibsonomy.org/bibtex/2e9ee77ab39938bdbcbc8209ebe0afc73/albinzehe},
description = {MEGATRON-CNTRL: Controllable Story Generation with External Knowledge Using Large-Scale Language Models},
interhash = {69ab876c55bdb4f8104b0212f775a1b2},
intrahash = {e9ee77ab39938bdbcbc8209ebe0afc73},
keywords = {cntrl emnlp2020 generation textgeneration},
note = {cite arxiv:2010.00840Comment: Accepted in EMNLP 2020 main conference},
timestamp = {2020-11-23T17:22:30.000+0100},
title = {MEGATRON-CNTRL: Controllable Story Generation with External Knowledge
Using Large-Scale Language Models},
url = {http://arxiv.org/abs/2010.00840},
year = 2020
}