Home > manopt > manifolds > fixedrank > fixedrankfactory_2factors_preconditioned.m

fixedrankfactory_2factors_preconditioned

PURPOSE ^

Manifold of m-by-n matrices of rank k with two factor quotient geometry.

SYNOPSIS ^

function M = fixedrankfactory_2factors_preconditioned(m, n, k)

DESCRIPTION ^

 Manifold of m-by-n matrices of rank k with two factor quotient geometry.

 function M = fixedrankfactory_2factors_preconditioned(m, n, k)

 This geometry is tuned to least-squares problems such as low-rank matrix
 completion with ell-2 loss.

 A point X on the manifold is represented as a structure with two
 fields: L and R. The matrices L (m-by-k) and R (n-by-k) are 
 full column-rank matrices such that X = L*R'.

 Tangent vectors are represented as a structure with two fields: L, R.
 
 Please cite the Manopt paper as well as the research paper:
     @Techreport{mishra2012optimized,
       Title   = {A {R}iemannian geometry for low-rank matrix completion},
       Author  = {Mishra, B. and Adithya Apuroop, K. and Sepulchre, R.},
       Journal = {Arxiv preprint arXiv:1211.1550},
       Year    = {2012}
     }


 See also: fixedrankembeddedfactory fixedrankfactory_2factors fixedrankfactory_3factors_preconditioned

CROSS-REFERENCE INFORMATION ^

This function calls: This function is called by:

SUBFUNCTIONS ^

SOURCE CODE ^

0001 function M = fixedrankfactory_2factors_preconditioned(m, n, k)
0002 % Manifold of m-by-n matrices of rank k with two factor quotient geometry.
0003 %
0004 % function M = fixedrankfactory_2factors_preconditioned(m, n, k)
0005 %
0006 % This geometry is tuned to least-squares problems such as low-rank matrix
0007 % completion with ell-2 loss.
0008 %
0009 % A point X on the manifold is represented as a structure with two
0010 % fields: L and R. The matrices L (m-by-k) and R (n-by-k) are
0011 % full column-rank matrices such that X = L*R'.
0012 %
0013 % Tangent vectors are represented as a structure with two fields: L, R.
0014 %
0015 % Please cite the Manopt paper as well as the research paper:
0016 %     @Techreport{mishra2012optimized,
0017 %       Title   = {A {R}iemannian geometry for low-rank matrix completion},
0018 %       Author  = {Mishra, B. and Adithya Apuroop, K. and Sepulchre, R.},
0019 %       Journal = {Arxiv preprint arXiv:1211.1550},
0020 %       Year    = {2012}
0021 %     }
0022 %
0023 %
0024 % See also: fixedrankembeddedfactory fixedrankfactory_2factors fixedrankfactory_3factors_preconditioned
0025 
0026 % This file is part of Manopt: www.manopt.org.
0027 % Original author: Bamdev Mishra, Dec. 30, 2012.
0028 % Contributors:
0029 % Change log:
0030 %
0031 %    Apr. 4, 2015 (BM):
0032 %        Cosmetic changes including avoiding storing the inverse of a
0033 %        k-by-k matrix.
0034 %
0035 %    Sep. 6, 2018 (NB):
0036 %        Removed M.exp() as it was not implemented.
0037   
0038     
0039     M.name = @() sprintf('LR''(tuned to least square problems) quotient manifold of %dx%d matrices of rank %d', m, n, k);
0040     
0041     M.dim = @() (m+n-k)*k;
0042     
0043     
0044     % Some precomputations at the point X to be used in the inner product
0045     % (and pretty much everywhere else).
0046     function X = prepare(X)
0047         if ~all(isfield(X,{'LtL','RtR'}))
0048             L = X.L;
0049             R = X.R;
0050             X.LtL = L'*L;
0051             X.RtR = R'*R;
0052         end
0053     end
0054     
0055     
0056     % The choice of metric is motivated by symmetry and
0057     % tuned to least-squares cost function.
0058     M.inner = @iproduct;
0059     function ip = iproduct(X, eta, zeta)
0060         X = prepare(X);
0061         ip = trace(X.RtR*(eta.L'*zeta.L)) + trace(X.LtL*(eta.R'*zeta.R)); % Scaled metric
0062     end
0063     
0064     M.norm = @(X, eta) sqrt(M.inner(X, eta, eta));
0065     
0066     M.dist = @(x, y) error('fixedrankfactory_2factors_preconditioned.dist not implemented yet.');
0067     
0068     M.typicaldist = @() 10*k;
0069     
0070     M.egrad2rgrad = @egrad2rgrad;
0071     function rgrad = egrad2rgrad(X, egrad)
0072         X = prepare(X);
0073         
0074         % Riemannian gradient
0075         rgrad.L = egrad.L/X.RtR;
0076         rgrad.R = egrad.R/X.LtL;
0077     end
0078     
0079     M.ehess2rhess = @ehess2rhess;
0080     function Hess = ehess2rhess(X, egrad, ehess, eta)
0081         X = prepare(X);
0082         
0083         % Riemannian gradient.
0084         rgrad = egrad2rgrad(X, egrad);
0085         
0086         % Directional derivative of the Riemannian gradient.
0087         Hess.L = ehess.L/X.RtR - 2*egrad.L*(X.RtR \ (symm(eta.R'*X.R) / X.RtR));
0088         Hess.R = ehess.R/X.LtL - 2*egrad.R*(X.LtL \ (symm(eta.L'*X.L) / X.LtL));
0089         
0090         % We still need a correction factor for the non-constant metric.
0091         Hess.L = Hess.L + rgrad.L*(symm(eta.R'*X.R)/X.RtR) + eta.L*(symm(rgrad.R'*X.R)/X.RtR) - X.L*(symm(eta.R'*rgrad.R)/X.RtR);
0092         Hess.R = Hess.R + rgrad.R*(symm(eta.L'*X.L)/X.LtL) + eta.R*(symm(rgrad.L'*X.L)/X.LtL) - X.R*(symm(eta.L'*rgrad.L)/X.LtL);
0093         
0094         % Project on the horizontal space.
0095         Hess = M.proj(X, Hess);
0096     end
0097     
0098     M.proj = @projection;
0099     function etaproj = projection(X, eta)
0100         X = prepare(X);
0101         
0102         % Projection onto the horizontal space.
0103         Lambda = 0.5*((eta.R'*X.R)/X.RtR  -   X.LtL\(X.L'*eta.L));
0104         etaproj.L = eta.L + X.L*Lambda;
0105         etaproj.R = eta.R - X.R*Lambda';
0106     end
0107     
0108     M.tangent = M.proj;
0109     
0110     M.tangent2ambient = @(X, eta) eta;
0111     
0112     M.retr = @retraction;
0113     function Y = retraction(X, eta, t)
0114         if nargin < 3
0115             t = 1.0;
0116         end
0117         Y.L = X.L + t*eta.L;
0118         Y.R = X.R + t*eta.R;
0119         
0120         % Numerical conditioning step: a simpler version.
0121         % We need to ensure that L and R are do not have very relative
0122         % skewed norms.
0123         
0124         scaling = norm(X.L, 'fro')/norm(X.R, 'fro');
0125         scaling = sqrt(scaling);
0126         Y.L = Y.L / scaling;
0127         Y.R = Y.R * scaling;
0128         
0129         % These are reused in the computations of gradient and Hessian.
0130         Y = prepare(Y);
0131     end
0132     
0133     
0134     M.hash = @(X) ['z' hashmd5([X.L(:) ; X.R(:)])];
0135     
0136     M.rand = @random;
0137     
0138     function X = random()
0139         X.L = randn(m, k);
0140         X.R = randn(n, k);
0141     end
0142     
0143     M.randvec = @randomvec;
0144     function eta = randomvec(X)
0145         eta.L = randn(m, k);
0146         eta.R = randn(n, k);
0147         eta = projection(X, eta);
0148         nrm = M.norm(X, eta);
0149         eta.L = eta.L / nrm;
0150         eta.R = eta.R / nrm;
0151     end
0152     
0153     M.lincomb = @lincomb;
0154     
0155     M.zerovec = @(X) struct('L', zeros(m, k),'R', zeros(n, k));
0156     
0157     M.transp = @(x1, x2, d) projection(x2, d);
0158     
0159     % vec and mat are not isometries, because of the scaled inner metric.
0160     M.vec = @(X, U) [U.L(:) ; U.R(:)];
0161     
0162     M.mat = @(X, u) struct('L', reshape(u(1:(m*k)), m, k), ...
0163         'R', reshape(u((m*k+1):end), n, k));
0164     
0165     M.vecmatareisometries = @() false;
0166     
0167     % Auxiliary functions
0168     symm = @(M) .5*(M+M');
0169 end
0170 
0171 % Linear combination of tangent vectors.
0172 function d = lincomb(x, a1, d1, a2, d2) %#ok<INUSL>
0173     
0174     if nargin == 3
0175         d.L = a1*d1.L;
0176         d.R = a1*d1.R;
0177     elseif nargin == 5
0178         d.L = a1*d1.L + a2*d2.L;
0179         d.R = a1*d1.R + a2*d2.R;
0180     else
0181         error('Bad use of fixedrankfactory_2factors_preconditioned.lincomb.');
0182     end
0183     
0184 end
0185 
0186 
0187 
0188 
0189

Generated on Fri 30-Sep-2022 13:18:25 by m2html © 2005