On October 14th, 2020, researchers from OpenAI, the Stanford Institute for Human-Centered Artificial Intelligence, and other universities convened to discuss open research questions surrounding GPT-3, the largest publicly-disclosed dense language model at the time. The meeting took place under Chatham House Rules. Discussants came from a variety of research backgrounds including computer science, linguistics, philosophy, political science, communications, cyber policy, and more. Broadly, the discussion centered around two main questions: 1) What are the technical capabilities and limitations of large language models? 2) What are the societal effects of widespread use of large language models? Here, we provide a detailed summary of the discussion organized by the two themes above.
%0 Journal Article
%1 tamkin_understanding_2021
%A Tamkin, Alex
%A Brundage, Miles
%A Clark, Jack
%A Ganguli, Deep
%D 2021
%J arXiv:2102.02503 cs
%K kuenstliche_intelligenz texttechnologie
%T Understanding the capabilities, limitations, and societal impact of large language models
%U http://arxiv.org/abs/2102.02503
%X On October 14th, 2020, researchers from OpenAI, the Stanford Institute for Human-Centered Artificial Intelligence, and other universities convened to discuss open research questions surrounding GPT-3, the largest publicly-disclosed dense language model at the time. The meeting took place under Chatham House Rules. Discussants came from a variety of research backgrounds including computer science, linguistics, philosophy, political science, communications, cyber policy, and more. Broadly, the discussion centered around two main questions: 1) What are the technical capabilities and limitations of large language models? 2) What are the societal effects of widespread use of large language models? Here, we provide a detailed summary of the discussion organized by the two themes above.
@article{tamkin_understanding_2021,
abstract = {On October 14th, 2020, researchers from OpenAI, the Stanford Institute for Human-Centered Artificial Intelligence, and other universities convened to discuss open research questions surrounding GPT-3, the largest publicly-disclosed dense language model at the time. The meeting took place under Chatham House Rules. Discussants came from a variety of research backgrounds including computer science, linguistics, philosophy, political science, communications, cyber policy, and more. Broadly, the discussion centered around two main questions: 1) What are the technical capabilities and limitations of large language models? 2) What are the societal effects of widespread use of large language models? Here, we provide a detailed summary of the discussion organized by the two themes above.},
added-at = {2021-11-29T12:47:08.000+0100},
author = {Tamkin, Alex and Brundage, Miles and Clark, Jack and Ganguli, Deep},
biburl = {https://www.bibsonomy.org/bibtex/2c1b21ebe8b2d2a0cb558d0b8f39fce76/lepsky},
interhash = {01313bac1dd0abf1c1746aa42ada2a11},
intrahash = {c1b21ebe8b2d2a0cb558d0b8f39fce76},
journal = {arXiv:2102.02503 [cs]},
keywords = {kuenstliche_intelligenz texttechnologie},
language = {en},
month = feb,
note = {arXiv: 2102.02503},
timestamp = {2021-11-29T12:50:05.000+0100},
title = {Understanding the capabilities, limitations, and societal impact of large language models},
url = {http://arxiv.org/abs/2102.02503},
urldate = {2021-11-20},
year = 2021
}