We consider the classical problem of sequential probability assignment under
logarithmic loss while competing against an arbitrary, potentially
nonparametric class of experts. We obtain tight bounds on the minimax regret
via a new approach that exploits the self-concordance property of the
logarithmic loss. We show that for any expert class with (sequential) metric
entropy $O(\gamma^-p)$ at scale $\gamma$, the minimax regret is
$O(n^p/(p+1))$, and that this rate cannot be improved without
additional assumptions on the expert class under consideration. As an
application of our techniques, we resolve the minimax regret for nonparametric
Lipschitz classes of experts.
Description
[2007.01160] Tight Bounds on Minimax Regret under Logarithmic Loss via Self-Concordance
%0 Journal Article
%1 bilodeau2020tight
%A Bilodeau, Blair
%A Foster, Dylan J.
%A Roy, Daniel M.
%D 2020
%K bounds game-theory optimization readings
%T Tight Bounds on Minimax Regret under Logarithmic Loss via
Self-Concordance
%U http://arxiv.org/abs/2007.01160
%X We consider the classical problem of sequential probability assignment under
logarithmic loss while competing against an arbitrary, potentially
nonparametric class of experts. We obtain tight bounds on the minimax regret
via a new approach that exploits the self-concordance property of the
logarithmic loss. We show that for any expert class with (sequential) metric
entropy $O(\gamma^-p)$ at scale $\gamma$, the minimax regret is
$O(n^p/(p+1))$, and that this rate cannot be improved without
additional assumptions on the expert class under consideration. As an
application of our techniques, we resolve the minimax regret for nonparametric
Lipschitz classes of experts.
@article{bilodeau2020tight,
abstract = {We consider the classical problem of sequential probability assignment under
logarithmic loss while competing against an arbitrary, potentially
nonparametric class of experts. We obtain tight bounds on the minimax regret
via a new approach that exploits the self-concordance property of the
logarithmic loss. We show that for any expert class with (sequential) metric
entropy $\mathcal{O}(\gamma^{-p})$ at scale $\gamma$, the minimax regret is
$\mathcal{O}(n^{p/(p+1)})$, and that this rate cannot be improved without
additional assumptions on the expert class under consideration. As an
application of our techniques, we resolve the minimax regret for nonparametric
Lipschitz classes of experts.},
added-at = {2020-08-04T14:12:40.000+0200},
author = {Bilodeau, Blair and Foster, Dylan J. and Roy, Daniel M.},
biburl = {https://www.bibsonomy.org/bibtex/22ead35ed14d4dd8bf2407ff90799b737/kirk86},
description = {[2007.01160] Tight Bounds on Minimax Regret under Logarithmic Loss via Self-Concordance},
interhash = {21dfe044843f49d65b9bb626c88bc6c8},
intrahash = {2ead35ed14d4dd8bf2407ff90799b737},
keywords = {bounds game-theory optimization readings},
note = {cite arxiv:2007.01160Comment: 25 pages},
timestamp = {2020-08-04T14:12:40.000+0200},
title = {Tight Bounds on Minimax Regret under Logarithmic Loss via
Self-Concordance},
url = {http://arxiv.org/abs/2007.01160},
year = 2020
}