0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 function [xR, residuum, gradnorm, cost, times] = RiemannLinsolve( L, F, X0, Lh, Ph, opts )
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 if ~exist( 'opts', 'var'); opts = struct(); end
0024 if ~isfield( opts, 'maxiter'); opts.maxiter = 100; end
0025 if ~isfield( opts, 'tol'); opts.tol = 1e-16; end
0026 if ~isfield( opts, 'gradtol'); opts.gradtol = 1e-16; end
0027 if ~isfield( opts, 'precond_tol'); opts.precond_tol = 1e-5; end
0028 if ~isfield( opts, 'precond_maxit'); opts.precond_maxit = 5; end
0029 if ~isfield( opts, 'safe_norm'); opts.safe_norm = false; end
0030 if ~isfield( opts, 'truncatedNewton'); opts.truncatedNewton = false; end
0031 if ~isfield( opts, 'tediousPrec'); opts.tediousPrec = false; end
0032 if ~isfield( opts, 'stagtol'); opts.stagtol = inf; end
0033
0034 if opts.truncatedNewton
0035 disp('Using truncated Newton with adaptive tolerance; not considering opts.precond_tol and opts.precond_maxit!')
0036 opts.precond_tol = NaN;
0037 opts.precond_maxit = 100;
0038 end
0039
0040
0041 d = X0.order;
0042 n = X0.size;
0043
0044 [xL, xR, G] = gauge_matrices( X0 );
0045
0046 cost = zeros(opts.maxiter, 1);
0047 residuum = zeros(opts.maxiter, 1);
0048 gradnorm = zeros(opts.maxiter, 1);
0049 times = zeros(opts.maxiter, 1);
0050 normRHS = norm(F);
0051 alpha = 0;
0052
0053 prev_res = inf;
0054
0055 t_start = tic;
0056
0057 for i = 1:opts.maxiter
0058
0059 g = euclid_grad( L, xR, F );
0060
0061 cost(i) = cost_function_res( xR, g );
0062 residuum(i) = norm(g, opts.safe_norm) / normRHS;
0063 times(i) = toc(t_start);
0064
0065 grad = TTeMPS_tangent_orth( xL, xR, g );
0066 gradnorm(i) = norm( grad );
0067 sprintf('Iteration %i', i)
0068 if opts.truncatedNewton
0069 opts.precond_tol = min(0.5, sqrt(gradnorm(i)));
0070 sprintf('... current truncated Newton tolerance %g', opts.precond_tol)
0071 sprintf('... current rel. gradnorm %g, stopping at %g', gradnorm(i) / normRHS, opts.gradtol)
0072 end
0073
0074
0075 if abs(residuum(i)) < opts.tol
0076 sprintf( 'Current residual: %g', residuum(i))
0077 gradnorm = gradnorm(1:i);
0078 residuum = residuum(1:i);
0079 cost = cost(1:i);
0080 times = times(1:i);
0081 sprintf( 'RiemannLinsolve CONVERGED after %i iterations', i )
0082 break
0083 end
0084
0085
0086 if abs(gradnorm(i)/normRHS) < opts.gradtol
0087 sprintf( 'Current norm of Riem. gradient: %g', gradnorm(i))
0088 gradnorm = gradnorm(1:i);
0089 residuum = residuum(1:i);
0090 cost = cost(1:i);
0091 times = times(1:i);
0092 sprintf( 'RiemannLinsolve CONVERGED after %i iterations', i )
0093 break
0094 end
0095
0096 residuum(i)/prev_res
0097
0098 if residuum(i) > opts.stagtol*prev_res
0099 sprintf( 'Current gradnorm reduction: %g', residuum(i)/prev_res )
0100 gradnorm = gradnorm(1:i);
0101 residuum = residuum(1:i);
0102 cost = cost(1:i);
0103 times = times(1:i);
0104 sprintf( 'RiemannLinsolve STAGNATED after %i iterations', i )
0105 break
0106 end
0107
0108 P_grad = solvePrecond_noSaddle( Lh, Ph, grad, xL, xR, opts, G );
0109
0110 alpha = linesearch_linearized( L, P_grad, g );
0111
0112
0113 xR = tangentAdd( P_grad, alpha, true );
0114 [xL, G] = left_orth_with_gauge( xR );
0115 prev_res = residuum(i);
0116 end
0117
0118 end
0119
0120 function res = cost_function_res( X, res )
0121 res = 0.5*innerprod( X, res );
0122 end
0123
0124 function res = euclid_grad( L, X, F )
0125 res = apply(L, X) - F;
0126 end
0127
0128 function alpha = linesearch_linearized( L, xi, g )
0129 eta = tangent_to_TTeMPS( xi );
0130 alpha = -innerprod( eta, g );
0131 alpha = alpha / innerprod( eta, apply(L, eta) );
0132 end