Skip to content

Updated all tensor-named paramteres to input_tensor, including in… #120

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyttb/cp_als.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def cp_als(

Parameters
----------
tensor: :class:`pyttb.tensor` or :class:`pyttb.sptensor` or :class:`pyttb.ktensor`
input_tensor: :class:`pyttb.tensor` or :class:`pyttb.sptensor` or :class:`pyttb.ktensor`
rank: int
Rank of the decomposition
stoptol: float
Expand Down
92 changes: 47 additions & 45 deletions pyttb/cp_apr.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@


def cp_apr(
tensor,
input_tensor,
rank,
algorithm="mu",
stoptol=1e-4,
Expand All @@ -38,7 +38,7 @@ def cp_apr(

Parameters
----------
tensor: :class:`pyttb.tensor` or :class:`pyttb.sptensor`
input_tensor: :class:`pyttb.tensor` or :class:`pyttb.sptensor`
rank: int
Rank of the decomposition
algorithm: str
Expand Down Expand Up @@ -85,12 +85,12 @@ def cp_apr(

"""
# Extract the number of modes in tensor X
N = tensor.ndims
N = input_tensor.ndims

assert rank > 0, "Number of components requested must be positive"

# Check that the data is non-negative.
tmp = tensor < 0.0
tmp = input_tensor < 0.0
assert (
tmp.nnz == 0
), "Data tensor must be nonnegative for Poisson-based factorization"
Expand All @@ -103,7 +103,7 @@ def cp_apr(
init.ncomponents == rank
), "Initial guess does not have the right number of componenets"
for n in range(N):
if init.shape[n] != tensor.shape[n]:
if init.shape[n] != input_tensor.shape[n]:
assert False, "Mode {} of the initial guess is the wrong size".format(n)
if np.min(init.factor_matrices[n]) < 0.0:
assert False, "Initial guess has negative element in mode {}".format(n)
Expand All @@ -113,13 +113,15 @@ def cp_apr(
elif init.lower() == "random":
factor_matrices = []
for n in range(N):
factor_matrices.append(np.random.uniform(0, 1, (tensor.shape[n], rank)))
factor_matrices.append(
np.random.uniform(0, 1, (input_tensor.shape[n], rank))
)
init = ttb.ktensor.from_factor_matrices(factor_matrices)

# Call solver based on the couce of algorithm parameter, passing all the other input parameters
if algorithm.lower() == "mu":
M, output = tt_cp_apr_mu(
tensor,
input_tensor,
rank,
init,
stoptol,
Expand All @@ -135,7 +137,7 @@ def cp_apr(
output["algorithm"] = "mu"
elif algorithm.lower() == "pdnr":
M, output = tt_cp_apr_pdnr(
tensor,
input_tensor,
rank,
init,
stoptol,
Expand All @@ -153,7 +155,7 @@ def cp_apr(
output["algorithm"] = "pdnr"
elif algorithm.lower() == "pqnr":
M, output = tt_cp_apr_pqnr(
tensor,
input_tensor,
rank,
init,
stoptol,
Expand All @@ -175,7 +177,7 @@ def cp_apr(


def tt_cp_apr_mu(
tensor,
input_tensor,
rank,
init,
stoptol,
Expand All @@ -193,7 +195,7 @@ def tt_cp_apr_mu(

Parameters
----------
tensor: :class:`pyttb.tensor` or :class:`pyttb.sptensor`
input_tensor: :class:`pyttb.tensor` or :class:`pyttb.sptensor`
rank: int
Rank of the decomposition
init: :class:`pyttb.ktensor`
Expand Down Expand Up @@ -227,7 +229,7 @@ def tt_cp_apr_mu(
URL: http://arxiv.org/abs/1112.2414. Submitted for publication.

"""
N = tensor.ndims
N = input_tensor.ndims

# TODO I vote no duplicate error checking, copy error checking from cp_apr for initial guess here if disagree

Expand Down Expand Up @@ -276,15 +278,15 @@ def tt_cp_apr_mu(

# Calculate product of all matrices but the n-th
# Sparse case only calculates entries corresponding to nonzeros in X
Pi = calculatePi(tensor, M, rank, n, N)
Pi = calculatePi(input_tensor, M, rank, n, N)

# Do the multiplicative updates
for i in range(maxinneriters):
# Count the inner iterations
nInnerIters[iter] += 1

# Calculate matrix for multiplicative update
Phi[n] = calculatePhi(tensor, M, rank, n, Pi, epsDivZero)
Phi[n] = calculatePhi(input_tensor, M, rank, n, Pi, epsDivZero)

# Check for convergence
kktModeViolations[n] = np.max(
Expand Down Expand Up @@ -335,12 +337,12 @@ def tt_cp_apr_mu(
# Clean up final result
M.normalize(sort=True, normtype=1)

obj = tt_loglikelihood(tensor, M)
obj = tt_loglikelihood(input_tensor, M)

if printitn > 0:
normTensor = tensor.norm()
normTensor = input_tensor.norm()
normresidual = np.sqrt(
normTensor**2 + M.norm() ** 2 - 2 * tensor.innerprod(M)
normTensor**2 + M.norm() ** 2 - 2 * input_tensor.innerprod(M)
)
fit = 1 - (normresidual / normTensor) # fraction explained by model
print("===========================================\n")
Expand Down Expand Up @@ -374,7 +376,7 @@ def tt_cp_apr_mu(


def tt_cp_apr_pdnr(
tensor,
input_tensor,
rank,
init,
stoptol,
Expand All @@ -399,7 +401,7 @@ def tt_cp_apr_pdnr(
Parameters
----------
# TODO it looks like this method of define union helps the typ hinting better than or
tensor: Union[:class:`pyttb.tensor`,:class:`pyttb.sptensor`]
input_tensor: Union[:class:`pyttb.tensor`,:class:`pyttb.sptensor`]
rank: int
Rank of the decomposition
init: str or :class:`pyttb.ktensor`
Expand Down Expand Up @@ -440,7 +442,7 @@ def tt_cp_apr_pdnr(

"""
# Extract the number of modes in tensor X
N = tensor.ndims
N = input_tensor.ndims

# If the initial guess has any rows of all zero elements, then modify so the row subproblem is not taking log(0).
# Values will be restored to zero later if the unfolded X for the row has no zeros.
Expand All @@ -456,7 +458,7 @@ def tt_cp_apr_pdnr(
M.normalize(normtype=1)

# Sparse tensor flag affects how Pi and Phi are computed.
if isinstance(tensor, ttb.sptensor):
if isinstance(input_tensor, ttb.sptensor):
isSparse = True
else:
isSparse = False
Expand Down Expand Up @@ -487,7 +489,7 @@ def tt_cp_apr_pdnr(
num_rows = M[n].shape[0]
row_indices = []
for jj in range(num_rows):
row_indices.append(np.where(tensor.subs[:, n] == jj)[0])
row_indices.append(np.where(input_tensor.subs[:, n] == jj)[0])
sparseIx.append(row_indices)

if printitn > 0:
Expand All @@ -511,8 +513,8 @@ def tt_cp_apr_pdnr(
# calculate khatri-rao product of all matrices but the n-th
if isSparse == False:
# Data is not a sparse tensor.
Pi = ttb.tt_calcpi_prowsubprob(tensor, M, rank, n, N, isSparse)
X_mat = ttb.tt_to_dense_matrix(tensor, n)
Pi = ttb.tt_calcpi_prowsubprob(input_tensor, M, rank, n, N, isSparse)
X_mat = ttb.tt_to_dense_matrix(input_tensor, n)

num_rows = M[n].shape[0]
isRowNOTconverged = np.zeros((num_rows,))
Expand All @@ -526,7 +528,7 @@ def tt_cp_apr_pdnr(
if isSparse:
# Data is a sparse tensor
if not precompinds:
sparse_indices = np.where(tensor.subs[:, n] == jj)[0]
sparse_indices = np.where(input_tensor.subs[:, n] == jj)[0]
else:
sparse_indices = sparseIx[n][jj]

Expand All @@ -535,11 +537,11 @@ def tt_cp_apr_pdnr(
M.factor_matrices[n][jj, :] = 0
continue

x_row = tensor.vals[sparse_indices]
x_row = input_tensor.vals[sparse_indices]

# Calculate just the columns of Pi needed for this row.
Pi = ttb.tt_calcpi_prowsubprob(
tensor, M, rank, n, N, isSparse, sparse_indices
input_tensor, M, rank, n, N, isSparse, sparse_indices
)

else:
Expand Down Expand Up @@ -663,7 +665,7 @@ def tt_cp_apr_pdnr(

# Print outer iteration status.
if printitn > 0 and np.mod(iter, printitn) == 0:
fnVals[iter] = -tt_loglikelihood(tensor, M)
fnVals[iter] = -tt_loglikelihood(input_tensor, M)
print(
"{}. Ttl Inner Its: {}, KKT viol = {}, obj = {}, nz: {}\n".format(
iter,
Expand All @@ -690,12 +692,12 @@ def tt_cp_apr_pdnr(
# Clean up final result
M.normalize(sort=True, normtype=1)

obj = tt_loglikelihood(tensor, M)
obj = tt_loglikelihood(input_tensor, M)

if printitn > 0:
normTensor = tensor.norm()
normTensor = input_tensor.norm()
normresidual = np.sqrt(
normTensor**2 + M.norm() ** 2 - 2 * tensor.innerprod(M)
normTensor**2 + M.norm() ** 2 - 2 * input_tensor.innerprod(M)
)
fit = 1 - (normresidual / normTensor) # fraction explained by model
print("===========================================\n")
Expand Down Expand Up @@ -732,7 +734,7 @@ def tt_cp_apr_pdnr(


def tt_cp_apr_pqnr(
tensor,
input_tensor,
rank,
init,
stoptol,
Expand Down Expand Up @@ -769,7 +771,7 @@ def tt_cp_apr_pqnr(

Parameters
----------
tensor: Union[:class:`pyttb.tensor`,:class:`pyttb.sptensor`]
input_tensor: Union[:class:`pyttb.tensor`,:class:`pyttb.sptensor`]
rank: int
Rank of the decomposition
init: str or :class:`pyttb.ktensor`
Expand Down Expand Up @@ -808,7 +810,7 @@ def tt_cp_apr_pqnr(
"""
# TODO first ~100 lines are identical to PDNR, consider abstracting just the algorithm portion
# Extract the number of modes in data tensor
N = tensor.ndims
N = input_tensor.ndims

# If the initial guess has any rows of all zero elements, then modify so the row subproblem is not taking log(0).
# Values will be restored to zero later if the unfolded X for the row has no zeros.
Expand All @@ -824,7 +826,7 @@ def tt_cp_apr_pqnr(
M.normalize(normtype=1)

# Sparse tensor flag affects how Pi and Phi are computed.
if isinstance(tensor, ttb.sptensor):
if isinstance(input_tensor, ttb.sptensor):
isSparse = True
else:
isSparse = False
Expand Down Expand Up @@ -855,7 +857,7 @@ def tt_cp_apr_pqnr(
num_rows = M[n].shape[0]
row_indices = []
for jj in range(num_rows):
row_indices.append(np.where(tensor.subs[:, n] == jj)[0])
row_indices.append(np.where(input_tensor.subs[:, n] == jj)[0])
sparseIx.append(row_indices)

if printitn > 0:
Expand All @@ -875,8 +877,8 @@ def tt_cp_apr_pqnr(
# calculate khatri-rao product of all matrices but the n-th
if isSparse == False:
# Data is not a sparse tensor.
Pi = ttb.tt_calcpi_prowsubprob(tensor, M, rank, n, N, isSparse)
X_mat = ttb.tt_to_dense_matrix(tensor, n)
Pi = ttb.tt_calcpi_prowsubprob(input_tensor, M, rank, n, N, isSparse)
X_mat = ttb.tt_to_dense_matrix(input_tensor, n)

num_rows = M[n].shape[0]
isRowNOTconverged = np.zeros((num_rows,))
Expand All @@ -887,7 +889,7 @@ def tt_cp_apr_pqnr(
if isSparse:
# Data is a sparse tensor
if not precompinds:
sparse_indices = np.where(tensor.subs[:, n] == jj)[0]
sparse_indices = np.where(input_tensor.subs[:, n] == jj)[0]
else:
sparse_indices = sparseIx[n][jj]

Expand All @@ -896,11 +898,11 @@ def tt_cp_apr_pqnr(
M.factor_matrices[n][jj, :] = 0
continue

x_row = tensor.vals[sparse_indices]
x_row = input_tensor.vals[sparse_indices]

# Calculate just the columns of Pi needed for this row.
Pi = ttb.tt_calcpi_prowsubprob(
tensor, M, rank, n, N, isSparse, sparse_indices
input_tensor, M, rank, n, N, isSparse, sparse_indices
)

else:
Expand Down Expand Up @@ -1071,7 +1073,7 @@ def tt_cp_apr_pqnr(

# Print outer iteration status.
if printitn > 0 and np.mod(iter, printitn) == 0:
fnVals[iter] = -tt_loglikelihood(tensor, M)
fnVals[iter] = -tt_loglikelihood(input_tensor, M)
print(
"{}. Ttl Inner Its: {}, KKT viol = {}, obj = {}, nz: {}\n".format(
iter, nInnerIters[iter], kktViolations[iter], fnVals[iter], num_zero
Expand All @@ -1092,12 +1094,12 @@ def tt_cp_apr_pqnr(
# Clean up final result
M.normalize(sort=True, normtype=1)

obj = tt_loglikelihood(tensor, M)
obj = tt_loglikelihood(input_tensor, M)

if printitn > 0:
normTensor = tensor.norm()
normTensor = input_tensor.norm()
normresidual = np.sqrt(
normTensor**2 + M.norm() ** 2 - 2 * tensor.innerprod(M)
normTensor**2 + M.norm() ** 2 - 2 * input_tensor.innerprod(M)
)
fit = 1 - (normresidual / normTensor) # fraction explained by model
print("===========================================\n")
Expand Down
Loading