Language model pretraining has led to significant performance gains but
careful comparison between different approaches is challenging. Training is
computationally expensive, often done on private datasets of different sizes,
and, as we will show, hyperparameter choices have significant impact on the
final results. We present a replication study of BERT pretraining (Devlin et
al., 2019) that carefully measures the impact of many key hyperparameters and
training data size. We find that BERT was significantly undertrained, and can
match or exceed the performance of every model published after it. Our best
model achieves state-of-the-art results on GLUE, RACE and SQuAD. These results
highlight the importance of previously overlooked design choices, and raise
questions about the source of recently reported improvements. We release our
models and code.
%0 Generic
%1 liu2019roberta
%A Liu, Yinhan
%A Ott, Myle
%A Goyal, Naman
%A Du, Jingfei
%A Joshi, Mandar
%A Chen, Danqi
%A Levy, Omer
%A Lewis, Mike
%A Zettlemoyer, Luke
%A Stoyanov, Veselin
%D 2019
%K knowledge matching measures ontology-matching relatedness semantic semantic-measures
%T RoBERTa: A Robustly Optimized BERT Pretraining Approach
%U http://arxiv.org/abs/1907.11692
%X Language model pretraining has led to significant performance gains but
careful comparison between different approaches is challenging. Training is
computationally expensive, often done on private datasets of different sizes,
and, as we will show, hyperparameter choices have significant impact on the
final results. We present a replication study of BERT pretraining (Devlin et
al., 2019) that carefully measures the impact of many key hyperparameters and
training data size. We find that BERT was significantly undertrained, and can
match or exceed the performance of every model published after it. Our best
model achieves state-of-the-art results on GLUE, RACE and SQuAD. These results
highlight the importance of previously overlooked design choices, and raise
questions about the source of recently reported improvements. We release our
models and code.
@misc{liu2019roberta,
abstract = {Language model pretraining has led to significant performance gains but
careful comparison between different approaches is challenging. Training is
computationally expensive, often done on private datasets of different sizes,
and, as we will show, hyperparameter choices have significant impact on the
final results. We present a replication study of BERT pretraining (Devlin et
al., 2019) that carefully measures the impact of many key hyperparameters and
training data size. We find that BERT was significantly undertrained, and can
match or exceed the performance of every model published after it. Our best
model achieves state-of-the-art results on GLUE, RACE and SQuAD. These results
highlight the importance of previously overlooked design choices, and raise
questions about the source of recently reported improvements. We release our
models and code.},
added-at = {2019-10-03T14:38:53.000+0200},
author = {Liu, Yinhan and Ott, Myle and Goyal, Naman and Du, Jingfei and Joshi, Mandar and Chen, Danqi and Levy, Omer and Lewis, Mike and Zettlemoyer, Luke and Stoyanov, Veselin},
biburl = {https://www.bibsonomy.org/bibtex/2a4c60811a43da7596716d79b67d26e0a/theodoro},
interhash = {040474bcd625e7dcc649bb20c81104d2},
intrahash = {a4c60811a43da7596716d79b67d26e0a},
keywords = {knowledge matching measures ontology-matching relatedness semantic semantic-measures},
note = {cite arxiv:1907.11692},
timestamp = {2019-10-03T14:38:53.000+0200},
title = {RoBERTa: A Robustly Optimized BERT Pretraining Approach},
url = {http://arxiv.org/abs/1907.11692},
year = 2019
}