We investigate the optimal model size and number of tokens for training a
transformer language model under a given compute budget. We find that current
large language models are significantly undertrained, a consequence of the
recent focus on scaling language models whilst keeping the amount of training
data constant. By training over 400 language models ranging from 70 million to
over 16 billion parameters on 5 to 500 billion tokens, we find that for
compute-optimal training, the model size and the number of training tokens
should be scaled equally: for every doubling of model size the number of
training tokens should also be doubled. We test this hypothesis by training a
predicted compute-optimal model, Chinchilla, that uses the same compute budget
as Gopher but with 70B parameters and 4$\times$ more more data. Chinchilla
uniformly and significantly outperforms Gopher (280B), GPT-3 (175B), Jurassic-1
(178B), and Megatron-Turing NLG (530B) on a large range of downstream
evaluation tasks. This also means that Chinchilla uses substantially less
compute for fine-tuning and inference, greatly facilitating downstream usage.
As a highlight, Chinchilla reaches a state-of-the-art average accuracy of 67.5%
on the MMLU benchmark, greater than a 7% improvement over Gopher.
%0 Generic
%1 hoffmann2022training
%A Hoffmann, Jordan
%A Borgeaud, Sebastian
%A Mensch, Arthur
%A Buchatskaya, Elena
%A Cai, Trevor
%A Rutherford, Eliza
%A Casas, Diego de Las
%A Hendricks, Lisa Anne
%A Welbl, Johannes
%A Clark, Aidan
%A Hennigan, Tom
%A Noland, Eric
%A Millican, Katie
%A Driessche, George van den
%A Damoc, Bogdan
%A Guy, Aurelia
%A Osindero, Simon
%A Simonyan, Karen
%A Elsen, Erich
%A Rae, Jack W.
%A Vinyals, Oriol
%A Sifre, Laurent
%D 2022
%K optimization
%T Training Compute-Optimal Large Language Models
%U http://arxiv.org/abs/2203.15556
%X We investigate the optimal model size and number of tokens for training a
transformer language model under a given compute budget. We find that current
large language models are significantly undertrained, a consequence of the
recent focus on scaling language models whilst keeping the amount of training
data constant. By training over 400 language models ranging from 70 million to
over 16 billion parameters on 5 to 500 billion tokens, we find that for
compute-optimal training, the model size and the number of training tokens
should be scaled equally: for every doubling of model size the number of
training tokens should also be doubled. We test this hypothesis by training a
predicted compute-optimal model, Chinchilla, that uses the same compute budget
as Gopher but with 70B parameters and 4$\times$ more more data. Chinchilla
uniformly and significantly outperforms Gopher (280B), GPT-3 (175B), Jurassic-1
(178B), and Megatron-Turing NLG (530B) on a large range of downstream
evaluation tasks. This also means that Chinchilla uses substantially less
compute for fine-tuning and inference, greatly facilitating downstream usage.
As a highlight, Chinchilla reaches a state-of-the-art average accuracy of 67.5%
on the MMLU benchmark, greater than a 7% improvement over Gopher.
@misc{hoffmann2022training,
abstract = {We investigate the optimal model size and number of tokens for training a
transformer language model under a given compute budget. We find that current
large language models are significantly undertrained, a consequence of the
recent focus on scaling language models whilst keeping the amount of training
data constant. By training over 400 language models ranging from 70 million to
over 16 billion parameters on 5 to 500 billion tokens, we find that for
compute-optimal training, the model size and the number of training tokens
should be scaled equally: for every doubling of model size the number of
training tokens should also be doubled. We test this hypothesis by training a
predicted compute-optimal model, Chinchilla, that uses the same compute budget
as Gopher but with 70B parameters and 4$\times$ more more data. Chinchilla
uniformly and significantly outperforms Gopher (280B), GPT-3 (175B), Jurassic-1
(178B), and Megatron-Turing NLG (530B) on a large range of downstream
evaluation tasks. This also means that Chinchilla uses substantially less
compute for fine-tuning and inference, greatly facilitating downstream usage.
As a highlight, Chinchilla reaches a state-of-the-art average accuracy of 67.5%
on the MMLU benchmark, greater than a 7% improvement over Gopher.},
added-at = {2023-08-21T20:10:08.000+0200},
author = {Hoffmann, Jordan and Borgeaud, Sebastian and Mensch, Arthur and Buchatskaya, Elena and Cai, Trevor and Rutherford, Eliza and Casas, Diego de Las and Hendricks, Lisa Anne and Welbl, Johannes and Clark, Aidan and Hennigan, Tom and Noland, Eric and Millican, Katie and Driessche, George van den and Damoc, Bogdan and Guy, Aurelia and Osindero, Simon and Simonyan, Karen and Elsen, Erich and Rae, Jack W. and Vinyals, Oriol and Sifre, Laurent},
biburl = {https://www.bibsonomy.org/bibtex/2ab3f4ffbe668e655bf873670d465e6b4/vincentqb},
description = {Training Compute-Optimal Large Language Models},
interhash = {fbb961ee37f6133cb08168e7c017184e},
intrahash = {ab3f4ffbe668e655bf873670d465e6b4},
keywords = {optimization},
note = {cite arxiv:2203.15556},
timestamp = {2023-08-21T20:10:08.000+0200},
title = {Training Compute-Optimal Large Language Models},
url = {http://arxiv.org/abs/2203.15556},
year = 2022
}