diff --git a/pyttb/sptensor.py b/pyttb/sptensor.py index 0a7f4b2a..38e586d0 100644 --- a/pyttb/sptensor.py +++ b/pyttb/sptensor.py @@ -1712,7 +1712,7 @@ def __mul__(self, other): ------- :class:`pyttb.sptensor` """ - if isinstance(other, (float,int)): + if isinstance(other, (float, int, np.number)): return ttb.sptensor.from_data(self.subs, self.vals*other, self.shape) if isinstance(other, (ttb.sptensor,ttb.tensor,ttb.ktensor)) and self.shape != other.shape: @@ -1754,7 +1754,7 @@ def __rmul__(self, other): ------- :class:`pyttb.sptensor` """ - if isinstance(other, (float,int)): + if isinstance(other, (float, int, np.number)): return self.__mul__(other) else: assert False, "This object cannot be multiplied by sptensor" @@ -2173,15 +2173,14 @@ def __repr__(self): # pragma: no cover __str__ = __repr__ - def ttm(self, matrices, mode, dims=None, transpose=False): + def ttm(self, matrices, dims=None, transpose=False): """ Sparse tensor times matrix. Parameters ---------- matrices: A matrix or list of matrices - mode: - dims: + dims: :class:`Numpy.ndarray`, int transpose: Transpose matrices to be multiplied Returns @@ -2190,10 +2189,15 @@ def ttm(self, matrices, mode, dims=None, transpose=False): """ if dims is None: dims = np.arange(self.ndims) + elif isinstance(dims, list): + dims = np.array(dims) + elif np.isscalar(dims) or isinstance(dims, list): + dims = np.array([dims]) + # Handle list of matrices if isinstance(matrices, list): # Check dimensions are valid - [dims, vidx] = tt_dimscheck(mode, self.ndims, len(matrices)) + [dims, vidx] = tt_dimscheck(dims, self.ndims, len(matrices)) # Calculate individual products Y = self.ttm(matrices[vidx[0]], dims[0], transpose=transpose) for i in range(1, dims.size): @@ -2208,22 +2212,23 @@ def ttm(self, matrices, mode, dims=None, transpose=False): if transpose: matrices = matrices.transpose() - # Check mode - if not np.isscalar(mode) or mode < 0 or mode > self.ndims-1: - assert False, "Mode must be in [0, ndims)" + # Ensure this is the terminal single dimension case + if not (dims.size == 1 and np.isin(dims, np.arange(self.ndims))): + assert False, "dims must contain values in [0,self.dims)" + dims = dims[0] # Compute the product # Check that sizes match - if self.shape[mode] != matrices.shape[1]: + if self.shape[dims] != matrices.shape[1]: assert False, "Matrix shape doesn't match tensor shape" # Compute the new size siz = np.array(self.shape) - siz[mode] = matrices.shape[0] + siz[dims] = matrices.shape[0] # Compute self[mode]' - Xnt = ttb.tt_to_sparse_matrix(self, mode, True) + Xnt = ttb.tt_to_sparse_matrix(self, dims, True) # Reshape puts the reshaped things after the unchanged modes, transpose then puts it in front idx = 0 @@ -2231,10 +2236,10 @@ def ttm(self, matrices, mode, dims=None, transpose=False): # Convert to sparse matrix and do multiplication; generally result is sparse Z = Xnt.dot(matrices.transpose()) - # Rearrange back into sparse tensor of original shape - Ynt = ttb.tt_from_sparse_matrix(Z, self.shape, mode, idx) + # Rearrange back into sparse tensor of correct shape + Ynt = ttb.tt_from_sparse_matrix(Z, siz, dims, idx) - if Z.nnz <= 0.5 * np.prod(siz): + if not isinstance(Z, np.ndarray) and Z.nnz <= 0.5 * np.prod(siz): return Ynt else: # TODO evaluate performance loss by casting into sptensor then tensor. I assume minimal since we are already diff --git a/pyttb/tensor.py b/pyttb/tensor.py index c6cd8d5a..dbb39a8d 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -921,7 +921,7 @@ def ttm(self, matrix, dims=None, transpose=False): assert False, "matrix must be of type numpy.ndarray" if not (dims.size == 1 and np.isin(dims, np.arange(self.ndims))): - assert False, "dims must contain values in [0,self.dims]" + assert False, "dims must contain values in [0,self.dims)" # old version (ver=0) shape = np.array(self.shape) @@ -1271,7 +1271,6 @@ def __getitem__(self, item): kpdims = [] # dimensions to keep rmdims = [] # dimensions to remove - # Determine the new size and what dimensions to keep # Determine the new size and what dimensions to keep for i in range(0, len(region)): if isinstance(region[i], slice): @@ -1289,19 +1288,11 @@ def __getitem__(self, item): # If the size is zero, then the result is returned as a scalar # otherwise, we convert the result to a tensor - if newsiz.size == 0: a = newdata else: - if rmdims.size == 0: - a = ttb.tensor.from_data(newdata) - else: - # If extracted data is a vector then no need to tranpose it - if len(newdata.shape) == 1: - a = ttb.tensor.from_data(newdata) - else: - a = ttb.tensor.from_data(np.transpose(newdata, np.concatenate((kpdims, rmdims)))) - return ttb.tt_subsubsref(a, item) + a = ttb.tensor.from_data(newdata) + return a # *** CASE 2a: Subscript indexing *** if len(item) > 1 and isinstance(item[-1], str) and item[-1] == 'extract': diff --git a/pyttb/ttensor.py b/pyttb/ttensor.py index 2aa461bb..5a3f5421 100644 --- a/pyttb/ttensor.py +++ b/pyttb/ttensor.py @@ -2,9 +2,20 @@ # LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the # U.S. Government retains certain rights in this software. -import pyttb as ttb -from .pyttb_utils import * +from pyttb import ( + ktensor, + tenmat, + tensor, + sptensor, + sptenmat, +) +from pyttb import pyttb_utils as ttb_utils import numpy as np +import scipy +import textwrap +import warnings + +ALT_CORE_ERROR = "TTensor doesn't support non-tensor cores yet" class ttensor(object): """ @@ -12,5 +23,550 @@ class ttensor(object): """ - def __init__(self, *args): # pragma:no cover - assert False, "TTENSOR class not yet implemented" + def __init__(self): + """ + Create an empty decomposed tucker tensor + + Returns + ------- + :class:`pyttb.ttensor` + """ + # Empty constructor + self.core = tensor() + self.u = [] + + @classmethod + def from_data(cls, core, factors): + """ + Construct an ttensor from fully defined core tensor and factor matrices. + + Parameters + ---------- + core: :class: `ttb.tensor` + factors: :class:`list(numpy.ndarray)` + + Returns + ------- + :class:`pyttb.ttensor` + + Examples + -------- + Import required modules: + + >>> import pyttb as ttb + >>> import numpy as np + + Set up input data + # Create ttensor with explicit data description + + >>> core_values = np.ones((2,2,2)) + >>> core = ttb.tensor.from_data(core_values) + >>> factors = [np.ones((1,2))] * len(core_values) + >>> K0 = ttb.ttensor.from_data(core, factors) + """ + ttensorInstance = ttensor() + if isinstance(core, tensor): + ttensorInstance.core = tensor.from_data(core.data, core.shape) + ttensorInstance.u = factors.copy() + else: + # TODO support any tensor type with supported ops + raise ValueError("TTENSOR doesn't yet support generic cores, only tensor") + ttensorInstance._validate_ttensor() + return ttensorInstance + + @classmethod + def from_tensor_type(cls, source): + """ + Converts other tensor types into a ttensor + + Parameters + ---------- + source: :class:`pyttb.ttensor` + + Returns + ------- + :class:`pyttb.ttensor` + """ + # Copy Constructor + if isinstance(source, ttensor): + return cls.from_data(source.core, source.u) + + def _validate_ttensor(self): + """ + Verifies the validity of constructed ttensor + + Returns + ------- + """ + # Confirm all factors are matrices + for factor_idx, factor in enumerate(self.u): + if not isinstance(factor, np.ndarray): + raise ValueError(f"Factor matrices must be numpy arrays but factor {factor_idx} was {type(factor)}") + if len(factor.shape) != 2: + raise ValueError(f"Factor matrix {factor_idx} has shape {factor.shape} and is not a matrix!") + + # Verify size consistency + core_order = len(self.core.shape) + num_matrices = len(self.u) + if core_order != num_matrices: + raise ValueError(f"CORE has order {core_order} but there are {num_matrices}") + for factor_idx, factor in enumerate(self.u): + if factor.shape[-1] != self.core.shape[factor_idx]: + raise ValueError(f"Factor matrix {factor_idx} does not have {self.core.shape[factor_idx]} columns") + + @property + def shape(self): + """ + Shape of the tensor this deconstruction represents. + + Returns + ------- + tuple(int) + """ + return tuple(factor.shape[0] for factor in self.u) + + def __repr__(self): # pragma: no cover + """ + String representation of a tucker tensor. + + Returns + ------- + str + Contains the core, and factor matrices as strings on different lines. + """ + display_string = ( + f"Tensor of shape: {self.shape}\n" + f"\tCore is a " + ) + display_string += textwrap.indent(str(self.core), '\t') + + for factor_idx, factor in enumerate(self.u): + display_string += f"\tU[{factor_idx}] = \n" + display_string += textwrap.indent(str(factor), '\t\t') + display_string += "\n" + return display_string + + __str__ = __repr__ + + def full(self): + """ + Convert a ttensor to a (dense) tensor. + + Returns + ------- + :class:`pyttb.tensor` + """ + recomposed_tensor = self.core.ttm(self.u) + + # There is a small chance tensor could be sparse so ensure we cast that to dense. + if not isinstance(recomposed_tensor, tensor): + raise ValueError(ALT_CORE_ERROR) + return recomposed_tensor + + def double(self): + """ + Convert ttensor to an array of doubles + + Returns + ------- + :class:`numpy.ndarray` + copy of tensor data + """ + return self.full().double() + + @property + def ndims(self): + """ + Number of dimensions of a ttensor. + + Returns + ------- + int + Number of dimensions of ttensor + """ + return len(self.u) + + def isequal(self, other): + """ + Component equality for ttensors + + Parameters + ---------- + other: :class:`pyttb.ttensor` + + Returns + ------- + bool: True if ttensors decompositions are identical, false otherwise + """ + if not isinstance(other, ttensor): + return False + if self.ndims != other.ndims: + return False + return self.core.isequal(other.core) and all( + np.array_equal(this_factor, other_factor) for this_factor, other_factor in zip(self.u, other.u) + ) + + def __pos__(self): + """ + Unary plus (+) for ttensors. Does nothing. + + Returns + ------- + :class:`pyttb.ttensor`, copy of tensor + """ + + return ttensor.from_tensor_type(self) + + def __neg__(self): + """ + Unary minus (-) for ttensors + + Returns + ------- + :class:`pyttb.ttensor`, copy of tensor + """ + + return ttensor.from_data(-self.core, self.u) + + def innerprod(self, other): + """ + Efficient inner product with a ttensor + + Parameters + ---------- + other: :class:`pyttb.tensor`, :class:`pyttb.sptensor`, :class:`pyttb.ktensor`, + :class:`pyttb.ttensor` + + Returns + ------- + float + """ + if isinstance(other, ttensor): + if self.shape != other.shape: + raise ValueError( + "ttensors must have same shape to perform an innerproduct, but this ttensor " + f"has shape {self.shape} and the other has {other.shape}" + ) + if np.prod(self.core.shape) > np.prod(other.core.shape): + # Reverse arguments so the ttensor with the smaller core comes first. + return other.innerprod(self) + W = [] + for (this_factor, other_factor) in zip(self.u, other.u): + W.append(this_factor.transpose().dot(other_factor)) + J = other.core.ttm(W) + return self.core.innerprod(J) + elif isinstance(other, (tensor, sptensor)): + if self.shape != other.shape: + raise ValueError( + "ttensors must have same shape to perform an innerproduct, but this ttensor " + f"has shape {self.shape} and the other has {other.shape}" + ) + if np.prod(self.shape) < np.prod(self.core.shape): + Z = self.full() + return Z.innerprod(other) + Z = other.ttm(self.u, transpose=True) + return Z.innerprod(self.core) + elif isinstance(other, ktensor): + # Call ktensor implementation + # TODO needs ttensor ttv + return other.innerprod(self) + else: + raise ValueError(f"Inner product between ttensor and {type(other)} is not supported") + + def __mul__(self, other): + """ + Element wise multiplication (*) for ttensors (only scalars supported) + + Parameters + ---------- + other: float, int + + Returns + ------- + :class:`pyttb.ttensor` + """ + if isinstance(other, (float, int, np.number)): + return ttensor.from_data(self.core * other, self.u) + raise ValueError( + "This object cannot be multiplied by ttensor. Convert to full if trying to " + "multiply ttensor by another tensor." + ) + + def __rmul__(self, other): + """ + Element wise right multiplication (*) for ttensors (only scalars supported) + + Parameters + ---------- + other: float, int + + Returns + ------- + :class:`pyttb.ttensor` + """ + if isinstance(other, (float, int, np.number)): + return self.__mul__(other) + raise ValueError("This object cannot be multiplied by ttensor") + + def ttv(self, vector, dims=None): + """ + TTensor times vector + + Parameters + ---------- + vector: :class:`Numpy.ndarray`, list[:class:`Numpy.ndarray`] + dims: :class:`Numpy.ndarray`, int + """ + if dims is None: + dims = np.array([]) + # TODO make helper function to check scalar since re-used many places + elif isinstance(dims, (float, int)): + dims = np.array([dims]) + + # Check that vector is a list of vectors, if not place single vector as element in list + if isinstance(vector, list): + return self.ttv(np.array(vector), dims) + if len(vector.shape) == 1 and isinstance(vector[0], (int, float, np.int_, np.float_)): + return self.ttv(np.array([vector]), dims) + + # Get sorted dims and index for multiplicands + dims, vidx = ttb_utils.tt_dimscheck(dims, self.ndims, vector.shape[0]) + + # Check that each multiplicand is the right size. + for i in range(dims.size): + if vector[vidx[i]].shape != (self.shape[dims[i]],): + raise ValueError("Multiplicand is wrong size") + + # Get remaining dimensions when we're done + remdims = np.setdiff1d(np.arange(0, self.ndims), dims) + + # Create W to multiply with core, only populated remaining dims + W = [None] * len(dims) + for i in range(dims.size): + dim_idx = dims[i] + W[dim_idx] = self.u[dim_idx].transpose().dot(vector[vidx[i]]) + + # Create new core + newcore = self.core.ttv(W, dims) + + # Create final result + if remdims.size == 0: + return newcore + else: + return ttensor.from_data(newcore, [self.u[dim] for dim in remdims]) + + def mttkrp(self, U, n): + """ + Matricized tensor times Khatri-Rao product for ttensors. + + Parameters + ---------- + U: array of matrices or ktensor + n: multiplies by all modes except n + + Returns + ------- + :class:`numpy.ndarray` + """ + # NOTE: MATLAB version calculates an unused R here + + W = [None] * self.ndims + for i in range(0, self.ndims): + if i == n: + continue + W[i] = self.u[i].transpose().dot(U[i]) + + Y = self.core.mttkrp(W, n) + + # Find each column of answer by multiplying by weights + return self.u[n].dot(Y) + + def norm(self): + """ + Compute the norm of a ttensor. + Returns + ------- + norm: float, Frobenius norm of Tensor + """ + if np.prod(self.shape) > np.prod(self.core.shape): + V = [] + for factor in self.u: + V.append(factor.transpose().dot(factor)) + Y = self.core.ttm(V) + tmp = Y.innerprod(self.core) + return np.sqrt(tmp) + else: + return self.full().norm() + + def permute(self, order): + """ + Permute dimensions for a ttensor + + Parameters + ---------- + order: :class:`Numpy.ndarray` + + Returns + ------- + :class:`pyttb.ttensor` + """ + if not np.array_equal(np.arange(0, self.ndims), np.sort(order)): + raise ValueError("Invalid permutation") + new_core = self.core.permute(order) + new_u = [self.u[idx] for idx in order] + return ttensor.from_data(new_core, new_u) + + def ttm(self, matrix, dims=None, transpose=False): + """ + Tensor times matrix for ttensor + + Parameters + ---------- + matrix: :class:`Numpy.ndarray`, list[:class:`Numpy.ndarray`] + dims: :class:`Numpy.ndarray`, int + transpose: bool + """ + if dims is None: + dims = np.arange(self.ndims) + elif isinstance(dims, list): + dims = np.array(dims) + elif np.isscalar(dims) or isinstance(dims, list): + dims = np.array([dims]) + + if not isinstance(matrix, list): + return self.ttm([matrix], dims, transpose) + + # Check that the dimensions are valid + dims, vidx = ttb_utils.tt_dimscheck(dims, self.ndims, len(matrix)) + + # Determine correct size index + size_idx = int(not transpose) + + # Check that each multiplicand is the right size. + for i in range(len(dims)): + if matrix[vidx[i]].shape[size_idx] != self.shape[dims[i]]: + raise ValueError(f"Multiplicand {i} is wrong size") + + # Do the actual multiplications in the specified modes. + new_u = self.u.copy() + for i in range(len(dims)): + if transpose: + new_u[dims[i]] = matrix[vidx[i]].transpose().dot(new_u[dims[i]]) + else: + new_u[dims[i]] = matrix[vidx[i]].dot(new_u[dims[i]]) + + return ttensor.from_data(self.core, new_u) + + def reconstruct(self, samples=None, modes=None): + """ + Reconstruct or partially reconstruct tensor from ttensor. + + Parameters + ---------- + samples: :class:`Numpy.ndarray`, list[:class:`Numpy.ndarray`] + modes: :class:`Numpy.ndarray`, list[:class:`Numpy.ndarray`] + + Returns + ------- + :class:`pyttb.ttensor` + """ + # Default to sampling full tensor + full_tensor_sampling = samples is None and modes is None + if full_tensor_sampling: + return self.full() + + if modes is None: + modes = np.arange(self.ndims) + elif isinstance(modes, list): + modes = np.array(modes) + elif np.isscalar(modes): + modes = np.array([modes]) + + if np.isscalar(samples): + samples = [np.array([samples])] + elif not isinstance(samples, list): + samples = [samples] + + unequal_lengths = len(samples) != len(modes) + if unequal_lengths: + raise ValueError( + "If samples and modes provides lengths must be equal, but " + f"samples had length {len(samples)} and modes {len(modes)}" + ) + + full_samples = [np.array([])] * self.ndims + for sample, mode in zip(samples, modes): + if np.isscalar(sample): + full_samples[mode] = np.array([sample]) + else: + full_samples[mode] = sample + + shape = self.shape + new_u = [] + for k in range(self.ndims): + if len(full_samples[k]) == 0: + # Skip empty samples + new_u.append(self.u[k]) + continue + elif len(full_samples[k].shape) == 2 and full_samples[k].shape[-1] == shape[k]: + new_u.append(full_samples[k].dot(self.u[k])) + else: + new_u.append(self.u[k][full_samples[k], :]) + + return ttensor.from_data(self.core, new_u).full() + + def nvecs(self, n, r, flipsign = True): + """ + Compute the leading mode-n vectors for a ttensor. + + Parameters + ---------- + n: mode for tensor matricization + r: number of eigenvalues + flipsign: Make each column's largest element positive if true + + Returns + ------- + :class:`numpy.ndarray` + """ + # Compute inner product of all n-1 factors + V = [] + for factor_idx, factor in enumerate(self.u): + if factor_idx == n: + V.append(factor) + else: + V.append(factor.transpose().dot(factor)) + H = self.core.ttm(V) + + if isinstance(H, sptensor): + raise NotImplementedError(ALT_CORE_ERROR) + else: + HnT = tenmat.from_tensor_type(H.full(), cdims=np.array([n])).double() + + G = self.core + + if isinstance(G, sptensor): + raise NotImplementedError(ALT_CORE_ERROR) + else: + GnT = tenmat.from_tensor_type(G.full(), cdims=np.array([n])).double() + + # Compute Xn * Xn' + Y = HnT.transpose().dot(GnT.dot(self.u[n].transpose())) + + # TODO: Lifted from tensor, consider common location + if r < Y.shape[0] - 1: + w, v = scipy.sparse.linalg.eigsh(Y, r) + v = v[:, (-np.abs(w)).argsort()] + v = v[:, :r] + else: + warnings.warn('Greater than or equal to tensor.shape[n] - 1 eigenvectors requires cast to dense to solve') + w, v = scipy.linalg.eigh(Y) + v = v[:, (-np.abs(w)).argsort()] + v = v[:, :r] + + if flipsign: + idx = np.argmax(np.abs(v), axis=0) + for i in range(v.shape[1]): + if v[idx[i], i] < 0: + v[:, i] *= -1 + return v \ No newline at end of file diff --git a/tests/test_sptensor.py b/tests/test_sptensor.py index d4a8f89d..1b008e23 100644 --- a/tests/test_sptensor.py +++ b/tests/test_sptensor.py @@ -1365,25 +1365,25 @@ def test_sptensor_ttm(sample_sptensor): result[:, 3, 3] = 3.5 result = ttb.tensor.from_data(result) result = ttb.sptensor.from_tensor_type(result) - assert sptensorInstance.ttm(sparse.coo_matrix(np.ones((4, 4))), mode=0).isequal(result) - assert sptensorInstance.ttm(sparse.coo_matrix(np.ones((4, 4))), mode=0, transpose=True).isequal(result) + assert sptensorInstance.ttm(sparse.coo_matrix(np.ones((4, 4))), dims=0).isequal(result) + assert sptensorInstance.ttm(sparse.coo_matrix(np.ones((4, 4))), dims=0, transpose=True).isequal(result) # This is a multiway multiplication yielding a sparse tensor, yielding a dense tensor relies on tensor.ttm matrix = sparse.coo_matrix(np.eye(4)) list_of_matrices = [matrix, matrix, matrix] - assert sptensorInstance.ttm(list_of_matrices, mode=np.array([0, 1, 2])).isequal(sptensorInstance) + assert sptensorInstance.ttm(list_of_matrices, dims=np.array([0, 1, 2])).isequal(sptensorInstance) with pytest.raises(AssertionError) as excinfo: - sptensorInstance.ttm(sparse.coo_matrix(np.ones((5, 5))), mode=0) + sptensorInstance.ttm(sparse.coo_matrix(np.ones((5, 5))), dims=0) assert "Matrix shape doesn't match tensor shape" in str(excinfo) with pytest.raises(AssertionError) as excinfo: - sptensorInstance.ttm(np.array([1, 2, 3, 4]), mode=0) + sptensorInstance.ttm(np.array([1, 2, 3, 4]), dims=0) assert "Sptensor.ttm: second argument must be a matrix" in str(excinfo) with pytest.raises(AssertionError) as excinfo: - sptensorInstance.ttm(sparse.coo_matrix(np.ones((4, 4))), mode=4) - assert "Mode must be in [0, ndims)" in str(excinfo) + sptensorInstance.ttm(sparse.coo_matrix(np.ones((4, 4))), dims=4) + assert "dims must contain values in [0,self.dims)" in str(excinfo) sptensorInstance[0, :, :] = 1 sptensorInstance[3, :, :] = 1 @@ -1397,17 +1397,20 @@ def test_sptensor_ttm(sample_sptensor): # TODO: Ensure mode mappings are consistent between matlab and numpy # MATLAB is opposite orientation so the mapping from matlab to numpy is # {3:0, 2:2, 1:1} - assert (sptensorInstance.ttm(sparse.coo_matrix(np.ones((4, 4))), mode=1).isequal(ttb.tensor.from_data(result))) + assert (sptensorInstance.ttm(sparse.coo_matrix(np.ones((4, 4))), dims=1).isequal(ttb.tensor.from_data(result))) result = 2*np.ones((4, 4, 4)) result[:, 1, 1] = 2.5 result[:, 1, 3] = 3.5 result[:, 2, 2] = 4.5 - assert (sptensorInstance.ttm(sparse.coo_matrix(np.ones((4, 4))), mode=0).isequal(ttb.tensor.from_data(result))) + assert (sptensorInstance.ttm(sparse.coo_matrix(np.ones((4, 4))), dims=0).isequal(ttb.tensor.from_data(result))) result = np.zeros((4, 4, 4)) result[0, :, :] = 4.0 result[3, :, :] = 4.0 result[1, 1, :] = 2 result[2, 2, :] = 2.5 - assert (sptensorInstance.ttm(sparse.coo_matrix(np.ones((4, 4))), mode=2).isequal(ttb.tensor.from_data(result))) + assert (sptensorInstance.ttm(sparse.coo_matrix(np.ones((4, 4))), dims=2).isequal(ttb.tensor.from_data(result))) + + # Confirm reshape for non-square matrix + assert sptensorInstance.ttm(sparse.coo_matrix(np.ones((1, 4))), dims=2).shape == (4,4,1) diff --git a/tests/test_tensor.py b/tests/test_tensor.py index ab81b1bb..46d7cf5d 100644 --- a/tests/test_tensor.py +++ b/tests/test_tensor.py @@ -280,6 +280,9 @@ def test_tensor__getitem__(sample_tensor_2way): assert tensorInstance[0, 0] == params['data'][0, 0] # Case 1 Subtensor assert (tensorInstance[:, :] == tensorInstance).data.all() + three_way_data = np.random.random((2, 3, 4)) + two_slices = (slice(None,None,None), 0, slice(None,None,None)) + assert (ttb.tensor.from_data(three_way_data)[two_slices].double() == three_way_data[two_slices]).all() # Case 1 Subtensor assert (tensorInstance[np.array([0, 1]), :].data == tensorInstance.data[[0, 1], :]).all() # Case 1 Subtensor @@ -1054,7 +1057,7 @@ def test_tensor_ttm(sample_tensor_2way, sample_tensor_3way, sample_tensor_4way): # 3-way, dims must be in range [0,self.ndims] with pytest.raises(AssertionError) as excinfo: tensorInstance3.ttm(M2, tensorInstance3.ndims + 1) - assert "dims must contain values in [0,self.dims]" in str(excinfo) + assert "dims must contain values in [0,self.dims)" in str(excinfo) @pytest.mark.indevelopment def test_tensor_ttt(sample_tensor_2way, sample_tensor_3way, sample_tensor_4way): diff --git a/tests/test_ttensor.py b/tests/test_ttensor.py new file mode 100644 index 00000000..6cb1d109 --- /dev/null +++ b/tests/test_ttensor.py @@ -0,0 +1,345 @@ +# Copyright 2022 National Technology & Engineering Solutions of Sandia, +# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the +# U.S. Government retains certain rights in this software. + +import numpy as np +import pyttb as ttb +import pytest +import scipy.sparse as sparse + +@pytest.fixture() +def sample_ttensor(): + """Simple TTENSOR to verify by hand""" + core = ttb.tensor.from_data(np.ones((2, 2, 2))) + factors = [np.ones((1, 2))] * len(core.shape) + ttensorInstance = ttb.ttensor().from_data(core, factors) + return ttensorInstance + +@pytest.fixture() +def random_ttensor(): + """Arbitrary TTENSOR to verify consistency between alternative operations""" + core = ttb.tensor.from_data(np.random.random((2, 3, 4))) + factors = [ + np.random.random((5, 2)), + np.random.random((2, 3)), + np.random.random((4, 4)), + ] + ttensorInstance = ttb.ttensor().from_data(core, factors) + return ttensorInstance + +@pytest.mark.indevelopment +def test_ttensor_initialization_empty(): + empty_tensor = ttb.tensor() + + # No args + ttensorInstance = ttb.ttensor() + assert ttensorInstance.core == empty_tensor + assert ttensorInstance.u == [] + +@pytest.mark.indevelopment +def test_ttensor_initialization_from_data(sample_ttensor): + ttensorInstance = sample_ttensor + assert isinstance(ttensorInstance.core, ttb.tensor) + assert all([isinstance(a_factor, np.ndarray) for a_factor in ttensorInstance.u]) + + # Negative Tests + non_array_factor = ttensorInstance.u + [1] + with pytest.raises(ValueError): + ttb.ttensor.from_data(ttensorInstance.core, non_array_factor[1:]) + + non_matrix_factor = ttensorInstance.u + [np.array([1])] + with pytest.raises(ValueError): + ttb.ttensor.from_data(ttensorInstance.core, non_matrix_factor[1:]) + + too_few_factors = ttensorInstance.u.copy() + too_few_factors.pop() + with pytest.raises(ValueError): + ttb.ttensor.from_data(ttensorInstance.core, too_few_factors) + + wrong_shape_factor = ttensorInstance.u.copy() + row, col = wrong_shape_factor[0].shape + wrong_shape_factor[0] = np.random.random((row+1, col+1)) + with pytest.raises(ValueError): + ttb.ttensor.from_data(ttensorInstance.core, wrong_shape_factor) + + # Enforce error until sptensor core/other cores supported + with pytest.raises(ValueError): + ttb.ttensor.from_data(ttb.sptensor.from_tensor_type(ttensorInstance.core), ttensorInstance.u) + +@pytest.mark.indevelopment +def test_ttensor_initialization_from_tensor_type(sample_ttensor): + + # Copy constructor + ttensorInstance = sample_ttensor + ttensorCopy = ttb.ttensor.from_tensor_type(ttensorInstance) + assert ttensorCopy.core == ttensorInstance.core + assert ttensorCopy.u == ttensorInstance.u + assert ttensorCopy.shape == ttensorInstance.shape + +@pytest.mark.indevelopment +def test_ttensor_full(sample_ttensor): + ttensorInstance = sample_ttensor + tensor = ttensorInstance.full() + # This sanity check only works for all 1's + assert tensor.double() == np.prod(ttensorInstance.core.shape) + + # Negative tests + sparse_core = ttb.sptensor() + sparse_core.shape = ttensorInstance.core.shape + sparse_u = [sparse.coo_matrix(np.zeros(factor.shape)) for factor in ttensorInstance.u] + # We could probably make these properties to avoid this edge case but expect to eventually cover these alternate + # cores + ttensorInstance.core = sparse_core + ttensorInstance.u = sparse_u + with pytest.raises(ValueError): + ttensorInstance.full() + +@pytest.mark.indevelopment +def test_ttensor_double(sample_ttensor): + ttensorInstance = sample_ttensor + # This sanity check only works for all 1's + assert ttensorInstance.double() == np.prod(ttensorInstance.core.shape) + +@pytest.mark.indevelopment +def test_ttensor_ndims(sample_ttensor): + ttensorInstance = sample_ttensor + + assert ttensorInstance.ndims == 3 + +@pytest.mark.indevelopment +def test_ttensor__pos__(sample_ttensor): + ttensorInstance = sample_ttensor + ttensorInstance2 = +ttensorInstance + + assert ttensorInstance.isequal(ttensorInstance2) + +@pytest.mark.indevelopment +def test_sptensor__neg__(sample_ttensor): + ttensorInstance = sample_ttensor + ttensorInstance2 = -ttensorInstance + ttensorInstance3 = -ttensorInstance2 + + assert not ttensorInstance.isequal(ttensorInstance2) + assert ttensorInstance.isequal(ttensorInstance3) + +@pytest.mark.indevelopment +def test_ttensor_innerproduct(sample_ttensor, random_ttensor): + ttensorInstance = sample_ttensor + + # TODO these are an overly simplistic edge case for ttensors that are a single float + + # ttensor innerprod ttensor + assert ttensorInstance.innerprod(ttensorInstance) == ttensorInstance.double()**2 + core_dim = ttensorInstance.core.shape[0] + 1 + ndim = ttensorInstance.ndims + large_core_ttensor = ttb.ttensor.from_data( + ttb.tensor.from_data(np.ones((core_dim,)*ndim)), + [np.ones((1, core_dim))] * ndim + ) + assert large_core_ttensor.innerprod(ttensorInstance) == ttensorInstance.full().innerprod(large_core_ttensor.full()) + + # ttensor innerprod tensor + assert ttensorInstance.innerprod(ttensorInstance.full()) == ttensorInstance.double() ** 2 + + # ttensr innerprod ktensor + ktensorInstance = ttb.ktensor.from_data(np.array([8.]), [np.array([[1.]])]*3) + assert ttensorInstance.innerprod(ktensorInstance) == ttensorInstance.double() ** 2 + + # ttensor innerprod tensor (shape larger than core) + random_ttensor.innerprod(random_ttensor.full()) + + # Negative Tests + ttensor_extra_factors = ttb.ttensor.from_tensor_type(ttensorInstance) + ttensor_extra_factors.u.extend(ttensorInstance.u) + with pytest.raises(ValueError): + ttensorInstance.innerprod(ttensor_extra_factors) + + tensor_extra_dim = ttb.tensor.from_data(np.ones(ttensorInstance.shape + (1,))) + with pytest.raises(ValueError): + ttensorInstance.innerprod(tensor_extra_dim) + + invalid_option = [] + with pytest.raises(ValueError): + ttensorInstance.innerprod(invalid_option) + +@pytest.mark.indevelopment +def test_ttensor__mul__(sample_ttensor): + ttensorInstance = sample_ttensor + mul_factor = 2 + + # This sanity check only works for all 1's + assert (ttensorInstance * mul_factor).double() == np.prod(ttensorInstance.core.shape) * mul_factor + assert (ttensorInstance * float(2)).double() == np.prod(ttensorInstance.core.shape) * float(mul_factor) + + # Negative tests + with pytest.raises(ValueError): + _ = ttensorInstance * 'some_string' + +@pytest.mark.indevelopment +def test_ttensor__rmul__(sample_ttensor): + ttensorInstance = sample_ttensor + mul_factor = 2 + + # This sanity check only works for all 1's + assert (mul_factor * ttensorInstance).double() == np.prod(ttensorInstance.core.shape) * mul_factor + assert (float(2) * ttensorInstance).double() == np.prod(ttensorInstance.core.shape) * float(mul_factor) + + # Negative tests + with pytest.raises(ValueError): + _ = 'some_string' * ttensorInstance + +@pytest.mark.indevelopment +def test_ttensor_ttv(sample_ttensor): + ttensorInstance = sample_ttensor + mul_factor = 1 + trivial_vectors = [np.array([mul_factor])]*len(ttensorInstance.shape) + final_value = sample_ttensor.ttv(trivial_vectors) + assert final_value == np.prod(ttensorInstance.core.shape) + + assert np.allclose( + ttensorInstance.ttv(trivial_vectors[0], 0).double(), + ttensorInstance.full().ttv(trivial_vectors[0], 0).double() + ) + + # Negative tests + wrong_shape_vector = trivial_vectors.copy() + wrong_shape_vector[0] = np.array([mul_factor, mul_factor]) + with pytest.raises(ValueError): + sample_ttensor.ttv(wrong_shape_vector) + +@pytest.mark.indevelopment +def test_ttensor_mttkrp(random_ttensor): + ttensorInstance = random_ttensor + column_length = 6 + vectors = [ + np.random.random((u.shape[0], column_length)) for u in ttensorInstance.u + ] + final_value = ttensorInstance.mttkrp(vectors, 2) + full_value = ttensorInstance.full().mttkrp(vectors, 2) + assert np.allclose(final_value, full_value), ( + f"TTensor value is: \n{final_value}\n\n" + f"Full value is: \n{full_value}" + ) + +@pytest.mark.indevelopment +def test_ttensor_norm(sample_ttensor, random_ttensor): + ttensorInstance = random_ttensor + assert np.isclose(ttensorInstance.norm(), ttensorInstance.full().norm()) + + # Core larger than full tensor + ttensorInstance = sample_ttensor + assert np.isclose(ttensorInstance.norm(), ttensorInstance.full().norm()) + +@pytest.mark.indevelopment +def test_ttensor_permute(random_ttensor): + ttensorInstance = random_ttensor + original_order = np.arange(0, len(ttensorInstance.core.shape)) + permuted_tensor = ttensorInstance.permute(original_order) + assert ttensorInstance.isequal(permuted_tensor) + + # Negative Tests + with pytest.raises(ValueError): + bad_permutation_order = np.arange(0, len(ttensorInstance.core.shape) + 1) + ttensorInstance.permute(bad_permutation_order) + +@pytest.mark.indevelopment +def test_ttensor_ttm(random_ttensor): + ttensorInstance = random_ttensor + row_length = 9 + matrices = [ + np.random.random((row_length, u.shape[0])) for u in ttensorInstance.u + ] + final_value = ttensorInstance.ttm(matrices, np.arange(len(matrices))) + reverse_value = ttensorInstance.ttm(list(reversed(matrices)), np.arange(len(matrices)-1, -1, -1)) + assert final_value.isequal(reverse_value), ( + f"TTensor value is: \n{final_value}\n\n" + f"Full value is: \n{reverse_value}" + ) + final_value = ttensorInstance.ttm(matrices) # No dims + assert final_value.isequal(reverse_value) + final_value = ttensorInstance.ttm(matrices, list(range(len(matrices)))) # Dims as list + assert final_value.isequal(reverse_value) + + + single_tensor_result = ttensorInstance.ttm(matrices[0], 0) + single_tensor_full_result = ttensorInstance.full().ttm(matrices[0], 0) + assert np.allclose(single_tensor_result.double(), single_tensor_full_result.double()), ( + f"TTensor value is: \n{single_tensor_result.full()}\n\n" + f"Full value is: \n{single_tensor_full_result}" + ) + + transposed_matrices = [matrix.transpose() for matrix in matrices] + transpose_value = ttensorInstance.ttm(transposed_matrices, np.arange(len(matrices)), transpose=True) + assert final_value.isequal(transpose_value) + + # Negative Tests + big_wrong_size = 123 + matrices[0] = np.random.random((big_wrong_size, big_wrong_size)) + with pytest.raises(ValueError): + _ = ttensorInstance.ttm(matrices, np.arange(len(matrices))) + + +@pytest.mark.indevelopment +def test_ttensor_reconstruct(random_ttensor): + ttensorInstance = random_ttensor + # TODO: This slice drops the singleton dimension, should it? If so should ttensor squeeze during reconstruct? + full_slice = ttensorInstance.full()[:, 1, :] + ttensor_slice = ttensorInstance.reconstruct(1, 1) + assert np.allclose(full_slice.double(), ttensor_slice.squeeze().double()) + assert ttensorInstance.reconstruct().isequal(ttensorInstance.full()) + sample_all_modes = [np.array([0])] * len(ttensorInstance.shape) + sample_all_modes[-1] = 0 # Make raw scalar + reconstruct_scalar = ttensorInstance.reconstruct(sample_all_modes).full().double() + full_scalar = ttensorInstance.full()[tuple(sample_all_modes)] + assert np.isclose(reconstruct_scalar, full_scalar) + + scale = np.random.random(ttensorInstance.u[1].shape).transpose() + _ = ttensorInstance.reconstruct(scale, 1) + # FIXME from the MATLAB docs wasn't totally clear how to validate this + + # Negative Tests + with pytest.raises(ValueError): + _ = ttensorInstance.reconstruct(1, [0, 1]) + +@pytest.mark.indevelopment +def test_ttensor_nvecs(random_ttensor): + ttensorInstance = random_ttensor + n = 0 + r = 2 + ttensor_eigvals = ttensorInstance.nvecs(n, r) + full_eigvals = ttensorInstance.full().nvecs(n, r) + assert np.allclose(ttensor_eigvals, full_eigvals) + + # Test for eig vals larger than shape-1 + n = 1 + r = 2 + full_eigvals = ttensorInstance.full().nvecs(n, r) + with pytest.warns(Warning) as record: + ttensor_eigvals = ttensorInstance.nvecs(n, r) + assert 'Greater than or equal to tensor.shape[n] - 1 eigenvectors requires cast to dense to solve' \ + in str(record[0].message) + assert np.allclose(ttensor_eigvals, full_eigvals) + + # Negative Tests + sparse_core = ttb.sptensor() + sparse_core.shape = ttensorInstance.core.shape + ttensorInstance.core = sparse_core + + # Sparse core + with pytest.raises(NotImplementedError): + ttensorInstance.nvecs(0, 1) + + # Sparse factors + sparse_u = [sparse.coo_matrix(np.zeros(factor.shape)) for factor in ttensorInstance.u] + ttensorInstance.u = sparse_u + with pytest.raises(NotImplementedError): + ttensorInstance.nvecs(0, 1) + +@pytest.mark.indevelopment +def test_sptensor_isequal(sample_ttensor): + ttensorInstance = sample_ttensor + # Negative Tests + assert not ttensorInstance.isequal(ttensorInstance.full()) + ttensor_extra_factors = ttb.ttensor.from_tensor_type(ttensorInstance) + ttensor_extra_factors.u.extend(ttensorInstance.u) + assert not ttensorInstance.isequal(ttensor_extra_factors)