We give an in-depth account of compositional matrix-space models (CMSMs), a type of generic models for natural language, wherein compositionality is realized via matrix multiplication. We argue for the structural plausibility of this model and show that it is able to cover and combine various common compositional natural language processing approaches. Then, we consider efficient task-specific learning methods for training CMSMs and evaluate their performance in compositionality prediction and sentiment analysis.
Description
Compositional matrix-space models of language: Definitions, properties, and learning methods – International Center for Computational Logic
%0 Journal Article
%1 asaadi2021compositional
%A Asaadi, Shima
%A Giesbrecht, Eugenie
%A Rudolph, Sebastian
%D 2021
%I Cambridge University Press
%J Natural Language Engineering
%K deeplearning embedding matrix nlp vector word
%P 1--49
%R 10.1017/S1351324921000206
%T Compositional matrix-space models of language: Definitions, properties, and learning methods
%X We give an in-depth account of compositional matrix-space models (CMSMs), a type of generic models for natural language, wherein compositionality is realized via matrix multiplication. We argue for the structural plausibility of this model and show that it is able to cover and combine various common compositional natural language processing approaches. Then, we consider efficient task-specific learning methods for training CMSMs and evaluate their performance in compositionality prediction and sentiment analysis.
@article{asaadi2021compositional,
abstract = {We give an in-depth account of compositional matrix-space models (CMSMs), a type of generic models for natural language, wherein compositionality is realized via matrix multiplication. We argue for the structural plausibility of this model and show that it is able to cover and combine various common compositional natural language processing approaches. Then, we consider efficient task-specific learning methods for training CMSMs and evaluate their performance in compositionality prediction and sentiment analysis.},
added-at = {2021-09-07T13:18:23.000+0200},
author = {Asaadi, Shima and Giesbrecht, Eugenie and Rudolph, Sebastian},
biburl = {https://www.bibsonomy.org/bibtex/221f06cbe0a08f0d03a71281028d37338/jaeschke},
description = {Compositional matrix-space models of language: Definitions, properties, and learning methods – International Center for Computational Logic},
doi = {10.1017/S1351324921000206},
interhash = {4726143a824964bbf895ec70768ee15e},
intrahash = {21f06cbe0a08f0d03a71281028d37338},
journal = {Natural Language Engineering},
keywords = {deeplearning embedding matrix nlp vector word},
month = aug,
pages = {1--49},
publisher = {Cambridge University Press},
timestamp = {2021-09-07T13:18:23.000+0200},
title = {Compositional matrix-space models of language: Definitions, properties, and learning methods},
year = 2021
}