Answering questions correctly from standardized eighth-grade science tests is itself a test of machine intelligence. Determining whether a system truly displays artificial intelligence is difficult and complex, and well-known assessments like the Turing Test are not suited to the task. The Allen Institute for Artificial Intelligence suggests that answering science exam questions successfully is a better measure of machine intelligence and designed a global competition to engage the research community in this approach. The outcome of the Allen AI Science Challenge highlights the current limitations of AI research in language understanding, reasoning, and commonsense knowledge; the highest scores are still limited to the capabilities of information-retrieval methods.
%0 Journal Article
%1 SchoenickClarkEtAl17cacm
%A Schoenick, Carissa
%A Clark, Peter
%A Tafjord, Oyvind
%A Turney, Peter
%A Etzioni, Oren
%D 2017
%J Communications of the ACM
%K 01801 acm paper ai system test language processing answer
%N 9
%P 60--64
%R 10.1145/3122814
%T Moving Beyond the Turing Test with the Allen AI Science Challenge
%V 60
%X Answering questions correctly from standardized eighth-grade science tests is itself a test of machine intelligence. Determining whether a system truly displays artificial intelligence is difficult and complex, and well-known assessments like the Turing Test are not suited to the task. The Allen Institute for Artificial Intelligence suggests that answering science exam questions successfully is a better measure of machine intelligence and designed a global competition to engage the research community in this approach. The outcome of the Allen AI Science Challenge highlights the current limitations of AI research in language understanding, reasoning, and commonsense knowledge; the highest scores are still limited to the capabilities of information-retrieval methods.
@article{SchoenickClarkEtAl17cacm,
abstract = {Answering questions correctly from standardized eighth-grade science tests is itself a test of machine intelligence. Determining whether a system truly displays artificial intelligence is difficult and complex, and well-known assessments like the Turing Test are not suited to the task. The Allen Institute for Artificial Intelligence suggests that answering science exam questions successfully is a better measure of machine intelligence and designed a global competition to engage the research community in this approach. The outcome of the Allen AI Science Challenge highlights the current limitations of AI research in language understanding, reasoning, and commonsense knowledge; the highest scores are still limited to the capabilities of information-retrieval methods.},
added-at = {2017-10-30T12:22:28.000+0100},
author = {Schoenick, Carissa and Clark, Peter and Tafjord, Oyvind and Turney, Peter and Etzioni, Oren},
biburl = {https://www.bibsonomy.org/bibtex/27449469f20da96cce0841d04ba0833c1/flint63},
doi = {10.1145/3122814},
file = {ACM Digital Library:2017/SchoenickClarkEtAl17cacm.pdf:PDF},
groups = {public},
interhash = {fb58dac715f73f745413e64f380cd53c},
intrahash = {7449469f20da96cce0841d04ba0833c1},
issn = {0001-0782},
journal = {Communications of the ACM},
keywords = {01801 acm paper ai system test language processing answer},
month = {#sep#},
number = 9,
pages = {60--64},
timestamp = {2018-04-16T12:10:10.000+0200},
title = {Moving Beyond the {Turing} Test with the {Allen AI} {Science Challenge}},
username = {flint63},
volume = 60,
year = 2017
}