We study empirical scaling laws for language model performance on the cross-entropy loss. The loss scales as a power-law with model size, dataset size, and the amount of compute used for training, with some trends spanning more than seven orders of magnitude. Other architectural details such as network width or depth have minimal effects within a wide range. Simple equations govern the dependence of overfitting on model/dataset size and the dependence of training speed on model size. These relationships allow us to determine the optimal allocation of a fixed compute budget. Larger models are significantly more sample-efficient, such that optimally compute-efficient training involves training very large models on a relatively modest amount of data and stopping significantly before convergence.
Description
Transformers start to scale at 1 billion tokens, 100 million parameters
%0 Journal Article
%1 journals/corr/abs-2001-08361
%A Kaplan, Jared
%A McCandlish, Sam
%A Henighan, Tom
%A Brown, Tom B.
%A Chess, Benjamin
%A Child, Rewon
%A Gray, Scott
%A Radford, Alec
%A Wu, Jeffrey
%A Amodei, Dario
%D 2020
%J CoRR
%K llms lstm transformer
%T Scaling Laws for Neural Language Models.
%U https://arxiv.org/pdf/2001.08361.pdf
%V abs/2001.08361
%X We study empirical scaling laws for language model performance on the cross-entropy loss. The loss scales as a power-law with model size, dataset size, and the amount of compute used for training, with some trends spanning more than seven orders of magnitude. Other architectural details such as network width or depth have minimal effects within a wide range. Simple equations govern the dependence of overfitting on model/dataset size and the dependence of training speed on model size. These relationships allow us to determine the optimal allocation of a fixed compute budget. Larger models are significantly more sample-efficient, such that optimally compute-efficient training involves training very large models on a relatively modest amount of data and stopping significantly before convergence.
@article{journals/corr/abs-2001-08361,
abstract = {We study empirical scaling laws for language model performance on the cross-entropy loss. The loss scales as a power-law with model size, dataset size, and the amount of compute used for training, with some trends spanning more than seven orders of magnitude. Other architectural details such as network width or depth have minimal effects within a wide range. Simple equations govern the dependence of overfitting on model/dataset size and the dependence of training speed on model size. These relationships allow us to determine the optimal allocation of a fixed compute budget. Larger models are significantly more sample-efficient, such that optimally compute-efficient training involves training very large models on a relatively modest amount of data and stopping significantly before convergence.},
added-at = {2023-06-01T07:56:46.000+0200},
author = {Kaplan, Jared and McCandlish, Sam and Henighan, Tom and Brown, Tom B. and Chess, Benjamin and Child, Rewon and Gray, Scott and Radford, Alec and Wu, Jeffrey and Amodei, Dario},
biburl = {https://www.bibsonomy.org/bibtex/2ee21e97f93caaf7b9c9d30cbafadc84c/ghagerer},
description = {Transformers start to scale at 1 billion tokens, 100 million parameters},
ee = {https://arxiv.org/abs/2001.08361},
interhash = {ac5c10859a2129e02b5deca5489a8510},
intrahash = {ee21e97f93caaf7b9c9d30cbafadc84c},
journal = {CoRR},
keywords = {llms lstm transformer},
timestamp = {2023-06-01T08:08:15.000+0200},
title = {Scaling Laws for Neural Language Models.},
url = {https://arxiv.org/pdf/2001.08361.pdf},
volume = {abs/2001.08361},
year = 2020
}