Sequential data often possesses a hierarchical structure with complex
dependencies between subsequences, such as found between the utterances in a
dialogue. In an effort to model this kind of generative process, we propose a
neural network-based generative architecture, with latent stochastic variables
that span a variable number of time steps. We apply the proposed model to the
task of dialogue response generation and compare it with recent neural network
architectures. We evaluate the model performance through automatic evaluation
metrics and by carrying out a human evaluation. The experiments demonstrate
that our model improves upon recently proposed models and that the latent
variables facilitate the generation of long outputs and maintain the context.
Описание
[1605.06069] A Hierarchical Latent Variable Encoder-Decoder Model for Generating Dialogues
%0 Generic
%1 serban2016hierarchical
%A Serban, Iulian Vlad
%A Sordoni, Alessandro
%A Lowe, Ryan
%A Charlin, Laurent
%A Pineau, Joelle
%A Courville, Aaron C.
%A Bengio, Yoshua
%D 2016
%K deep dialogue learning machine multiresolution network neural recurrent systems
%T A Hierarchical Latent Variable Encoder-Decoder Model for Generating Dialogues.
%U http://arxiv.org/abs/1605.06069
%X Sequential data often possesses a hierarchical structure with complex
dependencies between subsequences, such as found between the utterances in a
dialogue. In an effort to model this kind of generative process, we propose a
neural network-based generative architecture, with latent stochastic variables
that span a variable number of time steps. We apply the proposed model to the
task of dialogue response generation and compare it with recent neural network
architectures. We evaluate the model performance through automatic evaluation
metrics and by carrying out a human evaluation. The experiments demonstrate
that our model improves upon recently proposed models and that the latent
variables facilitate the generation of long outputs and maintain the context.
@misc{serban2016hierarchical,
abstract = {Sequential data often possesses a hierarchical structure with complex
dependencies between subsequences, such as found between the utterances in a
dialogue. In an effort to model this kind of generative process, we propose a
neural network-based generative architecture, with latent stochastic variables
that span a variable number of time steps. We apply the proposed model to the
task of dialogue response generation and compare it with recent neural network
architectures. We evaluate the model performance through automatic evaluation
metrics and by carrying out a human evaluation. The experiments demonstrate
that our model improves upon recently proposed models and that the latent
variables facilitate the generation of long outputs and maintain the context.},
added-at = {2017-08-22T23:19:05.000+0200},
author = {Serban, Iulian Vlad and Sordoni, Alessandro and Lowe, Ryan and Charlin, Laurent and Pineau, Joelle and Courville, Aaron C. and Bengio, Yoshua},
biburl = {https://www.bibsonomy.org/bibtex/2dbda7b9c5a47c2c63d6a27c0e48713d9/porta},
description = {[1605.06069] A Hierarchical Latent Variable Encoder-Decoder Model for Generating Dialogues},
interhash = {15d057b55b7f1993bee2c7bcf38a8f80},
intrahash = {dbda7b9c5a47c2c63d6a27c0e48713d9},
keywords = {deep dialogue learning machine multiresolution network neural recurrent systems},
note = {cite arxiv:1605.06069Comment: 15 pages, 5 tables, 4 figures},
timestamp = {2017-08-22T23:19:05.000+0200},
title = {A Hierarchical Latent Variable Encoder-Decoder Model for Generating Dialogues.},
url = {http://arxiv.org/abs/1605.06069},
year = 2016
}