Synthesis of Neural Networks: the Case of Cascaded Hebbians
{. Szepesvári. 96-102. Research Group on Artificial Intelligence, JATE-MTA, Szeged 6720, Aradi vrt tere 1., HUNGARY, (August 1996)
Abstract
We show that cascading Hebbian learning with any other convergent algorithm (called the forward algorithm) results in the convergence of the Hebbian weights to a stationary point where the Hebbian algorithm would converge if the weights of the forward algorithm had already converged. Further, it is shown that the convergence rate of the composite algorithm does not deteriorate because of the cascading. This result is a consequence of a more general theorem which is also stated and proved here, the proofs being based on a global Lipschitzian assumption. The theory is illustrated by a composite PCA-Hebbian architecture introduced by Micheals (Michaels, 1995).
%0 Report
%1 szepesvari1996e
%A Szepesvári, Cs.
%C Szeged 6720, Aradi vrt tere 1., HUNGARY
%D 1996
%K PCA approximation, networks, neural stochastic two-timescale
%N 96-102
%T Synthesis of Neural Networks: the Case of Cascaded Hebbians
%X We show that cascading Hebbian learning with any other convergent algorithm (called the forward algorithm) results in the convergence of the Hebbian weights to a stationary point where the Hebbian algorithm would converge if the weights of the forward algorithm had already converged. Further, it is shown that the convergence rate of the composite algorithm does not deteriorate because of the cascading. This result is a consequence of a more general theorem which is also stated and proved here, the proofs being based on a global Lipschitzian assumption. The theory is illustrated by a composite PCA-Hebbian architecture introduced by Micheals (Michaels, 1995).
@techreport{szepesvari1996e,
abstract = {We show that cascading Hebbian learning with any other convergent algorithm (called the forward algorithm) results in the convergence of the Hebbian weights to a stationary point where the Hebbian algorithm would converge if the weights of the forward algorithm had already converged. Further, it is shown that the convergence rate of the composite algorithm does not deteriorate because of the cascading. This result is a consequence of a more general theorem which is also stated and proved here, the proofs being based on a global Lipschitzian assumption. The theory is illustrated by a composite PCA-Hebbian architecture introduced by Micheals (Michaels, 1995).},
added-at = {2020-03-17T03:03:01.000+0100},
address = {Szeged 6720, Aradi vrt tere 1., HUNGARY},
author = {Szepesv{\'a}ri, {Cs}.},
biburl = {https://www.bibsonomy.org/bibtex/22643cc2b294b97426f7af34d17df264d/csaba},
date-added = {2010-08-28 17:38:14 -0600},
date-modified = {2010-09-02 13:09:16 -0600},
institution = {Research Group on Artificial Intelligence, JATE-MTA},
interhash = {314ede68aea991a52a1f2f98dec250da},
intrahash = {2643cc2b294b97426f7af34d17df264d},
keywords = {PCA approximation, networks, neural stochastic two-timescale},
month = {August},
note = {e-mail: szepes@math.u-szeged.hu},
number = {96-102},
pdf = {papers/TR96-102.pdf},
timestamp = {2020-03-17T03:03:01.000+0100},
title = {Synthesis of Neural Networks: the Case of Cascaded {H}ebbians},
year = 1996
}