diff --git a/pyttb/cp_als.py b/pyttb/cp_als.py index 64ba3aff..3286ddf0 100644 --- a/pyttb/cp_als.py +++ b/pyttb/cp_als.py @@ -23,7 +23,7 @@ def cp_als( Parameters ---------- - tensor: :class:`pyttb.tensor` or :class:`pyttb.sptensor` or :class:`pyttb.ktensor` + input_tensor: :class:`pyttb.tensor` or :class:`pyttb.sptensor` or :class:`pyttb.ktensor` rank: int Rank of the decomposition stoptol: float diff --git a/pyttb/cp_apr.py b/pyttb/cp_apr.py index 520d3ee7..89020a0c 100644 --- a/pyttb/cp_apr.py +++ b/pyttb/cp_apr.py @@ -14,7 +14,7 @@ def cp_apr( - tensor, + input_tensor, rank, algorithm="mu", stoptol=1e-4, @@ -38,7 +38,7 @@ def cp_apr( Parameters ---------- - tensor: :class:`pyttb.tensor` or :class:`pyttb.sptensor` + input_tensor: :class:`pyttb.tensor` or :class:`pyttb.sptensor` rank: int Rank of the decomposition algorithm: str @@ -85,12 +85,12 @@ def cp_apr( """ # Extract the number of modes in tensor X - N = tensor.ndims + N = input_tensor.ndims assert rank > 0, "Number of components requested must be positive" # Check that the data is non-negative. - tmp = tensor < 0.0 + tmp = input_tensor < 0.0 assert ( tmp.nnz == 0 ), "Data tensor must be nonnegative for Poisson-based factorization" @@ -103,7 +103,7 @@ def cp_apr( init.ncomponents == rank ), "Initial guess does not have the right number of componenets" for n in range(N): - if init.shape[n] != tensor.shape[n]: + if init.shape[n] != input_tensor.shape[n]: assert False, "Mode {} of the initial guess is the wrong size".format(n) if np.min(init.factor_matrices[n]) < 0.0: assert False, "Initial guess has negative element in mode {}".format(n) @@ -113,13 +113,15 @@ def cp_apr( elif init.lower() == "random": factor_matrices = [] for n in range(N): - factor_matrices.append(np.random.uniform(0, 1, (tensor.shape[n], rank))) + factor_matrices.append( + np.random.uniform(0, 1, (input_tensor.shape[n], rank)) + ) init = ttb.ktensor.from_factor_matrices(factor_matrices) # Call solver based on the couce of algorithm parameter, passing all the other input parameters if algorithm.lower() == "mu": M, output = tt_cp_apr_mu( - tensor, + input_tensor, rank, init, stoptol, @@ -135,7 +137,7 @@ def cp_apr( output["algorithm"] = "mu" elif algorithm.lower() == "pdnr": M, output = tt_cp_apr_pdnr( - tensor, + input_tensor, rank, init, stoptol, @@ -153,7 +155,7 @@ def cp_apr( output["algorithm"] = "pdnr" elif algorithm.lower() == "pqnr": M, output = tt_cp_apr_pqnr( - tensor, + input_tensor, rank, init, stoptol, @@ -175,7 +177,7 @@ def cp_apr( def tt_cp_apr_mu( - tensor, + input_tensor, rank, init, stoptol, @@ -193,7 +195,7 @@ def tt_cp_apr_mu( Parameters ---------- - tensor: :class:`pyttb.tensor` or :class:`pyttb.sptensor` + input_tensor: :class:`pyttb.tensor` or :class:`pyttb.sptensor` rank: int Rank of the decomposition init: :class:`pyttb.ktensor` @@ -227,7 +229,7 @@ def tt_cp_apr_mu( URL: http://arxiv.org/abs/1112.2414. Submitted for publication. """ - N = tensor.ndims + N = input_tensor.ndims # TODO I vote no duplicate error checking, copy error checking from cp_apr for initial guess here if disagree @@ -276,7 +278,7 @@ def tt_cp_apr_mu( # Calculate product of all matrices but the n-th # Sparse case only calculates entries corresponding to nonzeros in X - Pi = calculatePi(tensor, M, rank, n, N) + Pi = calculatePi(input_tensor, M, rank, n, N) # Do the multiplicative updates for i in range(maxinneriters): @@ -284,7 +286,7 @@ def tt_cp_apr_mu( nInnerIters[iter] += 1 # Calculate matrix for multiplicative update - Phi[n] = calculatePhi(tensor, M, rank, n, Pi, epsDivZero) + Phi[n] = calculatePhi(input_tensor, M, rank, n, Pi, epsDivZero) # Check for convergence kktModeViolations[n] = np.max( @@ -335,12 +337,12 @@ def tt_cp_apr_mu( # Clean up final result M.normalize(sort=True, normtype=1) - obj = tt_loglikelihood(tensor, M) + obj = tt_loglikelihood(input_tensor, M) if printitn > 0: - normTensor = tensor.norm() + normTensor = input_tensor.norm() normresidual = np.sqrt( - normTensor**2 + M.norm() ** 2 - 2 * tensor.innerprod(M) + normTensor**2 + M.norm() ** 2 - 2 * input_tensor.innerprod(M) ) fit = 1 - (normresidual / normTensor) # fraction explained by model print("===========================================\n") @@ -374,7 +376,7 @@ def tt_cp_apr_mu( def tt_cp_apr_pdnr( - tensor, + input_tensor, rank, init, stoptol, @@ -399,7 +401,7 @@ def tt_cp_apr_pdnr( Parameters ---------- # TODO it looks like this method of define union helps the typ hinting better than or - tensor: Union[:class:`pyttb.tensor`,:class:`pyttb.sptensor`] + input_tensor: Union[:class:`pyttb.tensor`,:class:`pyttb.sptensor`] rank: int Rank of the decomposition init: str or :class:`pyttb.ktensor` @@ -440,7 +442,7 @@ def tt_cp_apr_pdnr( """ # Extract the number of modes in tensor X - N = tensor.ndims + N = input_tensor.ndims # If the initial guess has any rows of all zero elements, then modify so the row subproblem is not taking log(0). # Values will be restored to zero later if the unfolded X for the row has no zeros. @@ -456,7 +458,7 @@ def tt_cp_apr_pdnr( M.normalize(normtype=1) # Sparse tensor flag affects how Pi and Phi are computed. - if isinstance(tensor, ttb.sptensor): + if isinstance(input_tensor, ttb.sptensor): isSparse = True else: isSparse = False @@ -487,7 +489,7 @@ def tt_cp_apr_pdnr( num_rows = M[n].shape[0] row_indices = [] for jj in range(num_rows): - row_indices.append(np.where(tensor.subs[:, n] == jj)[0]) + row_indices.append(np.where(input_tensor.subs[:, n] == jj)[0]) sparseIx.append(row_indices) if printitn > 0: @@ -511,8 +513,8 @@ def tt_cp_apr_pdnr( # calculate khatri-rao product of all matrices but the n-th if isSparse == False: # Data is not a sparse tensor. - Pi = ttb.tt_calcpi_prowsubprob(tensor, M, rank, n, N, isSparse) - X_mat = ttb.tt_to_dense_matrix(tensor, n) + Pi = ttb.tt_calcpi_prowsubprob(input_tensor, M, rank, n, N, isSparse) + X_mat = ttb.tt_to_dense_matrix(input_tensor, n) num_rows = M[n].shape[0] isRowNOTconverged = np.zeros((num_rows,)) @@ -526,7 +528,7 @@ def tt_cp_apr_pdnr( if isSparse: # Data is a sparse tensor if not precompinds: - sparse_indices = np.where(tensor.subs[:, n] == jj)[0] + sparse_indices = np.where(input_tensor.subs[:, n] == jj)[0] else: sparse_indices = sparseIx[n][jj] @@ -535,11 +537,11 @@ def tt_cp_apr_pdnr( M.factor_matrices[n][jj, :] = 0 continue - x_row = tensor.vals[sparse_indices] + x_row = input_tensor.vals[sparse_indices] # Calculate just the columns of Pi needed for this row. Pi = ttb.tt_calcpi_prowsubprob( - tensor, M, rank, n, N, isSparse, sparse_indices + input_tensor, M, rank, n, N, isSparse, sparse_indices ) else: @@ -663,7 +665,7 @@ def tt_cp_apr_pdnr( # Print outer iteration status. if printitn > 0 and np.mod(iter, printitn) == 0: - fnVals[iter] = -tt_loglikelihood(tensor, M) + fnVals[iter] = -tt_loglikelihood(input_tensor, M) print( "{}. Ttl Inner Its: {}, KKT viol = {}, obj = {}, nz: {}\n".format( iter, @@ -690,12 +692,12 @@ def tt_cp_apr_pdnr( # Clean up final result M.normalize(sort=True, normtype=1) - obj = tt_loglikelihood(tensor, M) + obj = tt_loglikelihood(input_tensor, M) if printitn > 0: - normTensor = tensor.norm() + normTensor = input_tensor.norm() normresidual = np.sqrt( - normTensor**2 + M.norm() ** 2 - 2 * tensor.innerprod(M) + normTensor**2 + M.norm() ** 2 - 2 * input_tensor.innerprod(M) ) fit = 1 - (normresidual / normTensor) # fraction explained by model print("===========================================\n") @@ -732,7 +734,7 @@ def tt_cp_apr_pdnr( def tt_cp_apr_pqnr( - tensor, + input_tensor, rank, init, stoptol, @@ -769,7 +771,7 @@ def tt_cp_apr_pqnr( Parameters ---------- - tensor: Union[:class:`pyttb.tensor`,:class:`pyttb.sptensor`] + input_tensor: Union[:class:`pyttb.tensor`,:class:`pyttb.sptensor`] rank: int Rank of the decomposition init: str or :class:`pyttb.ktensor` @@ -808,7 +810,7 @@ def tt_cp_apr_pqnr( """ # TODO first ~100 lines are identical to PDNR, consider abstracting just the algorithm portion # Extract the number of modes in data tensor - N = tensor.ndims + N = input_tensor.ndims # If the initial guess has any rows of all zero elements, then modify so the row subproblem is not taking log(0). # Values will be restored to zero later if the unfolded X for the row has no zeros. @@ -824,7 +826,7 @@ def tt_cp_apr_pqnr( M.normalize(normtype=1) # Sparse tensor flag affects how Pi and Phi are computed. - if isinstance(tensor, ttb.sptensor): + if isinstance(input_tensor, ttb.sptensor): isSparse = True else: isSparse = False @@ -855,7 +857,7 @@ def tt_cp_apr_pqnr( num_rows = M[n].shape[0] row_indices = [] for jj in range(num_rows): - row_indices.append(np.where(tensor.subs[:, n] == jj)[0]) + row_indices.append(np.where(input_tensor.subs[:, n] == jj)[0]) sparseIx.append(row_indices) if printitn > 0: @@ -875,8 +877,8 @@ def tt_cp_apr_pqnr( # calculate khatri-rao product of all matrices but the n-th if isSparse == False: # Data is not a sparse tensor. - Pi = ttb.tt_calcpi_prowsubprob(tensor, M, rank, n, N, isSparse) - X_mat = ttb.tt_to_dense_matrix(tensor, n) + Pi = ttb.tt_calcpi_prowsubprob(input_tensor, M, rank, n, N, isSparse) + X_mat = ttb.tt_to_dense_matrix(input_tensor, n) num_rows = M[n].shape[0] isRowNOTconverged = np.zeros((num_rows,)) @@ -887,7 +889,7 @@ def tt_cp_apr_pqnr( if isSparse: # Data is a sparse tensor if not precompinds: - sparse_indices = np.where(tensor.subs[:, n] == jj)[0] + sparse_indices = np.where(input_tensor.subs[:, n] == jj)[0] else: sparse_indices = sparseIx[n][jj] @@ -896,11 +898,11 @@ def tt_cp_apr_pqnr( M.factor_matrices[n][jj, :] = 0 continue - x_row = tensor.vals[sparse_indices] + x_row = input_tensor.vals[sparse_indices] # Calculate just the columns of Pi needed for this row. Pi = ttb.tt_calcpi_prowsubprob( - tensor, M, rank, n, N, isSparse, sparse_indices + input_tensor, M, rank, n, N, isSparse, sparse_indices ) else: @@ -1071,7 +1073,7 @@ def tt_cp_apr_pqnr( # Print outer iteration status. if printitn > 0 and np.mod(iter, printitn) == 0: - fnVals[iter] = -tt_loglikelihood(tensor, M) + fnVals[iter] = -tt_loglikelihood(input_tensor, M) print( "{}. Ttl Inner Its: {}, KKT viol = {}, obj = {}, nz: {}\n".format( iter, nInnerIters[iter], kktViolations[iter], fnVals[iter], num_zero @@ -1092,12 +1094,12 @@ def tt_cp_apr_pqnr( # Clean up final result M.normalize(sort=True, normtype=1) - obj = tt_loglikelihood(tensor, M) + obj = tt_loglikelihood(input_tensor, M) if printitn > 0: - normTensor = tensor.norm() + normTensor = input_tensor.norm() normresidual = np.sqrt( - normTensor**2 + M.norm() ** 2 - 2 * tensor.innerprod(M) + normTensor**2 + M.norm() ** 2 - 2 * input_tensor.innerprod(M) ) fit = 1 - (normresidual / normTensor) # fraction explained by model print("===========================================\n") diff --git a/pyttb/hosvd.py b/pyttb/hosvd.py index 4c9cb60b..51ccddfa 100644 --- a/pyttb/hosvd.py +++ b/pyttb/hosvd.py @@ -9,7 +9,7 @@ def hosvd( - tensor, + input_tensor, tol: float, verbosity: float = 1, dimorder: Optional[List[int]] = None, @@ -24,7 +24,7 @@ def hosvd( Parameters ---------- - tensor: Tensor to factor + input_tensor: Tensor to factor tol: Relative error to stop at verbosity: Print level dimorder: Order to loop through dimensions @@ -42,7 +42,7 @@ def hosvd( True """ # In tucker als this is N - d = tensor.ndims + d = input_tensor.ndims if ranks is not None: if len(ranks) != d: @@ -68,7 +68,7 @@ def hosvd( if verbosity > 0: print("Computing HOSVD...\n") - normxsqr = (tensor**2).collapse() + normxsqr = (input_tensor**2).collapse() eigsumthresh = ((tol**2) * normxsqr) / d if verbosity > 2: @@ -81,7 +81,7 @@ def hosvd( # Main Loop factor_matrices = [np.empty(1)] * d # Copy input tensor, shrinks every step for sequential - Y = ttb.tensor.from_tensor_type(tensor) + Y = ttb.tensor.from_tensor_type(input_tensor) for k in dimorder: # Compute Gram matrix @@ -123,7 +123,7 @@ def hosvd( result = ttb.ttensor.from_data(G, factor_matrices) if verbosity > 0: - diffnormsqr = ((tensor - result.full()) ** 2).collapse() + diffnormsqr = ((input_tensor - result.full()) ** 2).collapse() relnorm = np.sqrt(diffnormsqr / normxsqr) print(f" Size of core: {G.shape}") if relnorm <= tol: diff --git a/pyttb/tucker_als.py b/pyttb/tucker_als.py index 3cd7117e..02636875 100644 --- a/pyttb/tucker_als.py +++ b/pyttb/tucker_als.py @@ -6,14 +6,20 @@ def tucker_als( - tensor, rank, stoptol=1e-4, maxiters=1000, dimorder=None, init="random", printitn=1 + input_tensor, + rank, + stoptol=1e-4, + maxiters=1000, + dimorder=None, + init="random", + printitn=1, ): """ Compute Tucker decomposition with alternating least squares Parameters ---------- - tensor: :class:`pyttb.tensor` + input_tensor: :class:`pyttb.tensor` rank: int, list[int] Rank of the decomposition(s) stoptol: float @@ -48,8 +54,8 @@ def tucker_als( * `fit`: value of the fitness function (fraction of tensor data explained by the model) """ - N = tensor.ndims - normX = tensor.norm() + N = input_tensor.ndims + normX = input_tensor.norm() # TODO: These argument checks look common with CP-ALS factor out if not isinstance(stoptol, Real): @@ -86,7 +92,7 @@ def tucker_als( f"Init needs to be of length tensor.ndim (which was {N}) but only got length {len(init)}." ) for n in dimorder[1::]: - correct_shape = (tensor.shape[n], rank[n]) + correct_shape = (input_tensor.shape[n], rank[n]) if Uinit[n].shape != correct_shape: raise ValueError( f"Init factor {n} had incorrect shape. Expected {correct_shape} but got {Uinit[n].shape}" @@ -97,7 +103,7 @@ def tucker_als( # first index in dimorder because that will be solved for in the first # inner iteration. for n in range(1, N): - Uinit[n] = np.random.uniform(0, 1, (tensor.shape[n], rank[n])) + Uinit[n] = np.random.uniform(0, 1, (input_tensor.shape[n], rank[n])) elif isinstance(init, str) and init.lower() in ("nvecs", "eigs"): # Compute an orthonormal basis for the dominant # Rn-dimensional left singular subspace of @@ -105,7 +111,7 @@ def tucker_als( Uinit = [None] * N for n in dimorder[1::]: print(f" Computing {rank[n]} leading e-vector for factor {n}.\n") - Uinit[n] = tensor.nvecs(n, rank[n]) + Uinit[n] = input_tensor.nvecs(n, rank[n]) else: raise ValueError( f"The selected initialization method is not supported. Provided: {init}" @@ -124,7 +130,7 @@ def tucker_als( # Iterate over all N modes of the tensor for n in dimorder: - Utilde = tensor.ttm(U, exclude_dims=n, transpose=True) + Utilde = input_tensor.ttm(U, exclude_dims=n, transpose=True) # Maximize norm(Utilde x_n W') wrt W and # maintain orthonormality of W U[n] = Utilde.nvecs(n, rank[n])