There has been significant progress in unsupervised network representation learning (UNRL) approaches over graphs recently with flexible random-walk approaches, new optimization objectives and deep architectures. However, there is no common ground for systematic comparison of embeddings to understand their behavior for different graphs and tasks. We argue that most of the UNRL approaches either model and exploit neighborhood or what we call context information of a node. These methods largely differ in their definitions and exploitation of context. Consequently, we propose a framework that casts a variety of approaches – random walk based, matrix factorization and deep learning based – into a unified context-based optimization function. We systematically group the methods based on their similarities and differences. We study their differences which we later use to explain their performance differences (on downstream tasks).
IEEE Transactions on Knowledge and Data Engineering
pages
1--1
language
en
file
Khosla et al - A Comparative Study for Unsupervised Network Representation Learning.pdf:C\:\\Users\\Admin\\Documents\\Research\\_Paperbase\\Graph Embeddings\\Khosla et al - A Comparative Study for Unsupervised Network Representation Learning.pdf:application/pdf
%0 Journal Article
%1 khosla_comparative_2019
%A Khosla, Megha
%A Setty, Vinay
%A Anand, Avishek
%D 2019
%J IEEE Transactions on Knowledge and Data Engineering
%K Embedding_Algorithm Node_Embeddings Survey
%P 1--1
%R 10.1109/TKDE.2019.2951398
%T A Comparative Study for Unsupervised Network Representation Learning
%U http://arxiv.org/abs/1903.07902
%X There has been significant progress in unsupervised network representation learning (UNRL) approaches over graphs recently with flexible random-walk approaches, new optimization objectives and deep architectures. However, there is no common ground for systematic comparison of embeddings to understand their behavior for different graphs and tasks. We argue that most of the UNRL approaches either model and exploit neighborhood or what we call context information of a node. These methods largely differ in their definitions and exploitation of context. Consequently, we propose a framework that casts a variety of approaches – random walk based, matrix factorization and deep learning based – into a unified context-based optimization function. We systematically group the methods based on their similarities and differences. We study their differences which we later use to explain their performance differences (on downstream tasks).
@article{khosla_comparative_2019,
abstract = {There has been significant progress in unsupervised network representation learning (UNRL) approaches over graphs recently with flexible random-walk approaches, new optimization objectives and deep architectures. However, there is no common ground for systematic comparison of embeddings to understand their behavior for different graphs and tasks. We argue that most of the UNRL approaches either model and exploit neighborhood or what we call context information of a node. These methods largely differ in their definitions and exploitation of context. Consequently, we propose a framework that casts a variety of approaches – random walk based, matrix factorization and deep learning based – into a unified context-based optimization function. We systematically group the methods based on their similarities and differences. We study their differences which we later use to explain their performance differences (on downstream tasks).},
added-at = {2020-02-21T16:09:44.000+0100},
author = {Khosla, Megha and Setty, Vinay and Anand, Avishek},
biburl = {https://www.bibsonomy.org/bibtex/2ee886307cb2b82c477e87286ee0e53b0/tschumacher},
doi = {10.1109/TKDE.2019.2951398},
file = {Khosla et al - A Comparative Study for Unsupervised Network Representation Learning.pdf:C\:\\Users\\Admin\\Documents\\Research\\_Paperbase\\Graph Embeddings\\Khosla et al - A Comparative Study for Unsupervised Network Representation Learning.pdf:application/pdf},
interhash = {8b31963a3c9d5db1f872f2e84b842ce8},
intrahash = {ee886307cb2b82c477e87286ee0e53b0},
issn = {1041-4347, 1558-2191, 2326-3865},
journal = {IEEE Transactions on Knowledge and Data Engineering},
keywords = {Embedding_Algorithm Node_Embeddings Survey},
language = {en},
note = {arXiv: 1903.07902},
pages = {1--1},
timestamp = {2020-02-21T16:09:44.000+0100},
title = {A {Comparative} {Study} for {Unsupervised} {Network} {Representation} {Learning}},
url = {http://arxiv.org/abs/1903.07902},
urldate = {2019-12-10},
year = 2019
}