DaMAT.utils

This file contains the utility functions for running TT-ICE pack

  1""" This file contains the utility functions for running TT-ICE pack"""
  2
  3import numpy as np
  4
  5
  6def primes(n):
  7    """
  8    Finds prime factors for a given number.
  9
 10    Used for calculating the reshapings.
 11
 12    Parameters
 13    ----------
 14    n:obj:`int`
 15        Number to be factored out
 16
 17    Returns
 18    -------
 19    primFac:obj:`list`
 20        List of prime factors
 21    """
 22    primFac = []
 23    d = 2
 24    while d * d <= n:
 25        while (n % d) == 0:
 26            primFac.append(d)  # supposing you want multiple factors repeated
 27            n //= d
 28        d += 1
 29    if n > 1:
 30        primFac.append(n)
 31    return primFac
 32
 33
 34def coreContraction(cores):
 35    """
 36    Converts TT-cores to full size tensors.
 37
 38    Parameters
 39    ----------
 40    cores:obj:`list` or iterable
 41        A list of TT-cores.
 42
 43    Returns
 44    -------
 45    coreProd:obj:`np.ndarray`
 46        Full tensor represented by the TT-cores.
 47    """
 48    # TODO: input checking
 49
 50    for coreIdx in range(len(cores) - 1):
 51        if coreIdx == 0:
 52            coreProd = np.tensordot(cores[coreIdx], cores[coreIdx + 1], axes=(-1, 0))
 53        else:
 54            coreProd = np.tensordot(coreProd, cores[coreIdx + 1], axes=(-1, 0))
 55    # coreProd = coreProd.reshape(coreProd.shape[1:-1])
 56    return coreProd.squeeze()
 57
 58
 59def deltaSVD(data, dataNorm, dimensions, eps=0.1):
 60    """
 61    Performs delta-truncated SVD similar to that of the `TTSVD`_ algorithm.
 62
 63    Given an unfolding matrix of a tensor, its norm and the number of dimensions
 64    of the original tensor, this function computes the truncated SVD of `data`.
 65    The truncation error is determined using the dimension dependant _delta_ formula
 66    from `TTSVD`_ paper.
 67
 68    Parameters
 69    ----------
 70    data:obj:`numpy.array`
 71        Matrix for which the truncated SVD will be performed.
 72    dataNorm:obj:`float`
 73        Norm of the matrix. This parameter is used to determine the truncation bound.
 74    dimensions:obj:`int`
 75        Number of dimensions of the original tensor. This parameter is used to determine
 76        the truncation bound.
 77    eps:obj:`float`, optional
 78        Relative error upper bound for TT-decomposition.
 79
 80    Returns
 81    -------
 82    u:obj:`numpy.ndarray`
 83        Column-wise orthonormal matrix of left-singular vectors. _Truncated_
 84    s:obj:`numpy.array`
 85        Array of singular values. _Truncated_
 86    v:obj:`numpy.ndarray`
 87        Row-wise orthonormal matrix of right-singular vectors. _Truncated_
 88
 89    .. _TTSVD:
 90        https://epubs.siam.org/doi/epdf/10.1137/090752286
 91    """
 92
 93    # TODO: input checking
 94
 95    delta = (eps / ((dimensions - 1) ** (0.5))) * dataNorm
 96    try:
 97        u, s, v = np.linalg.svd(data, False, True)
 98    except np.linalg.LinAlgError:
 99        print("Numpy svd did not converge, using qr+svd")
100        q, r = np.linalg.qr(data)
101        u, s, v = np.linalg.svd(r)
102        u = q @ u
103    slist = list(s * s)
104    slist.reverse()
105    truncpost = [
106        idx for idx, element in enumerate(np.cumsum(slist)) if element <= delta**2
107    ]
108    truncationRank = len(s) - len(truncpost)
109    return u[:, :truncationRank], s[:truncationRank], v[:truncationRank, :]
110
111
112def ttsvd(data, dataNorm, eps=0.1, dtype=np.float32):
113    """
114    Computes Tensor-Train decomposition/approximation of tensors using `TTSVD`_ algorithm.
115
116    Parameters
117    ----------
118    data:obj:`numpy.array`
119        Tensor to be decomposed/approximated.
120    dataNorm:obj:`float`
121        Norm of the tensor. This parameter is used to determine the truncation bound.
122    eps:obj:`float`, optional
123        Relative error upper bound for TT-decomposition. Set to 0.1 by default.
124    dtype:obj:`type`, optional
125        Data type to be used during computations. Set to `np.float32` by default .
126
127    Returns
128    -------
129    ranks:obj:`list`
130        List of TT-ranks.
131    cores:obj:`numpy.ndarray`
132        Cores of the TT-approximation.
133
134    .. _TTSVD:
135        https://epubs.siam.org/doi/epdf/10.1137/090752286
136    """
137    inputShape = data.shape
138    dimensions = len(data.shape)
139    delta = (eps / ((dimensions - 1) ** (0.5))) * dataNorm
140    ranks = [1]
141    cores = []
142    for k in range(dimensions - 1):
143        nk = inputShape[k]
144        data = data.reshape(
145            ranks[k] * nk, int(np.prod(data.shape) / (ranks[k] * nk)), order="F"
146        ).astype(dtype)
147        u, s, v = np.linalg.svd(data, False, True)
148        slist = list(s * s)
149        slist.reverse()
150        truncpost = [
151            idx for idx, element in enumerate(np.cumsum(slist)) if element <= delta**2
152        ]
153        ranks.append(len(s) - len(truncpost))
154
155        u = u[:, : ranks[-1]]
156
157        cores.append(u.reshape(ranks[k], nk, ranks[k + 1], order="F"))
158        data = np.zeros_like(v[: ranks[-1], :])
159        for idx, sigma in enumerate(s[: ranks[-1]]):
160            data[idx, :] = sigma * v[idx, :]
161
162    ranks.append(1)
163    cores.append(data.reshape(ranks[-2], inputShape[-1], ranks[-1], order="F"))
164    return ranks, cores
165
166def mode_n_unfolding(tensor,mode):
167    # Computes mode-n unfolding/matricization of a tensor in the sense of Kolda&Bader
168    # Assumes the mode is given in 0 indexed format
169    nDims = len(tensor.shape)
170    dims = [dim for dim in range(nDims)]
171    modeIdx = dims.pop(mode)
172    dims=[modeIdx]+dims
173    tensor=tensor.transpose(dims)
174    return tensor.reshape(tensor.shape[0],-1,order='F')
175
176def solve(A,B,method='pinv'):
177    if method=='pinv':
178        try:
179            return np.linalg.pinv(A)@B
180        except np.linalg.LinAlgError:
181            print("Numpy svd did not converge, using qr+svd")
182            q, r = np.linalg.qr(A)
183            return q.T@np.linalg.pinv(r)@B
184    elif method=='lstsq':
185        try:
186            return np.linalg.lstsq(A,B,rcond=None)[0]
187        except np.linalg.LinAlgError:
188            print("Numpy svd did not converge, using qr+svd")
189            q, r = np.linalg.qr(A)
190            return q.T@np.linalg.lstsq(r,B)[0]
def primes(n):
 7def primes(n):
 8    """
 9    Finds prime factors for a given number.
10
11    Used for calculating the reshapings.
12
13    Parameters
14    ----------
15    n:obj:`int`
16        Number to be factored out
17
18    Returns
19    -------
20    primFac:obj:`list`
21        List of prime factors
22    """
23    primFac = []
24    d = 2
25    while d * d <= n:
26        while (n % d) == 0:
27            primFac.append(d)  # supposing you want multiple factors repeated
28            n //= d
29        d += 1
30    if n > 1:
31        primFac.append(n)
32    return primFac

Finds prime factors for a given number.

Used for calculating the reshapings.

Parameters
  • nint: Number to be factored out
Returns
  • primFaclist: List of prime factors
def coreContraction(cores):
35def coreContraction(cores):
36    """
37    Converts TT-cores to full size tensors.
38
39    Parameters
40    ----------
41    cores:obj:`list` or iterable
42        A list of TT-cores.
43
44    Returns
45    -------
46    coreProd:obj:`np.ndarray`
47        Full tensor represented by the TT-cores.
48    """
49    # TODO: input checking
50
51    for coreIdx in range(len(cores) - 1):
52        if coreIdx == 0:
53            coreProd = np.tensordot(cores[coreIdx], cores[coreIdx + 1], axes=(-1, 0))
54        else:
55            coreProd = np.tensordot(coreProd, cores[coreIdx + 1], axes=(-1, 0))
56    # coreProd = coreProd.reshape(coreProd.shape[1:-1])
57    return coreProd.squeeze()

Converts TT-cores to full size tensors.

Parameters
  • coreslist or iterable: A list of TT-cores.
Returns
  • coreProdnp.ndarray: Full tensor represented by the TT-cores.
def deltaSVD(data, dataNorm, dimensions, eps=0.1):
 60def deltaSVD(data, dataNorm, dimensions, eps=0.1):
 61    """
 62    Performs delta-truncated SVD similar to that of the `TTSVD`_ algorithm.
 63
 64    Given an unfolding matrix of a tensor, its norm and the number of dimensions
 65    of the original tensor, this function computes the truncated SVD of `data`.
 66    The truncation error is determined using the dimension dependant _delta_ formula
 67    from `TTSVD`_ paper.
 68
 69    Parameters
 70    ----------
 71    data:obj:`numpy.array`
 72        Matrix for which the truncated SVD will be performed.
 73    dataNorm:obj:`float`
 74        Norm of the matrix. This parameter is used to determine the truncation bound.
 75    dimensions:obj:`int`
 76        Number of dimensions of the original tensor. This parameter is used to determine
 77        the truncation bound.
 78    eps:obj:`float`, optional
 79        Relative error upper bound for TT-decomposition.
 80
 81    Returns
 82    -------
 83    u:obj:`numpy.ndarray`
 84        Column-wise orthonormal matrix of left-singular vectors. _Truncated_
 85    s:obj:`numpy.array`
 86        Array of singular values. _Truncated_
 87    v:obj:`numpy.ndarray`
 88        Row-wise orthonormal matrix of right-singular vectors. _Truncated_
 89
 90    .. _TTSVD:
 91        https://epubs.siam.org/doi/epdf/10.1137/090752286
 92    """
 93
 94    # TODO: input checking
 95
 96    delta = (eps / ((dimensions - 1) ** (0.5))) * dataNorm
 97    try:
 98        u, s, v = np.linalg.svd(data, False, True)
 99    except np.linalg.LinAlgError:
100        print("Numpy svd did not converge, using qr+svd")
101        q, r = np.linalg.qr(data)
102        u, s, v = np.linalg.svd(r)
103        u = q @ u
104    slist = list(s * s)
105    slist.reverse()
106    truncpost = [
107        idx for idx, element in enumerate(np.cumsum(slist)) if element <= delta**2
108    ]
109    truncationRank = len(s) - len(truncpost)
110    return u[:, :truncationRank], s[:truncationRank], v[:truncationRank, :]

Performs delta-truncated SVD similar to that of the TTSVD algorithm.

Given an unfolding matrix of a tensor, its norm and the number of dimensions of the original tensor, this function computes the truncated SVD of data. The truncation error is determined using the dimension dependant _delta_ formula from TTSVD paper.

Parameters
  • datanumpy.array: Matrix for which the truncated SVD will be performed.
  • dataNormfloat: Norm of the matrix. This parameter is used to determine the truncation bound.
  • dimensionsint: Number of dimensions of the original tensor. This parameter is used to determine the truncation bound.
  • epsfloat, optional: Relative error upper bound for TT-decomposition.
Returns
  • unumpy.ndarray: Column-wise orthonormal matrix of left-singular vectors. _Truncated_
  • snumpy.array: Array of singular values. _Truncated_
  • vnumpy.ndarray: Row-wise orthonormal matrix of right-singular vectors. _Truncated_
def ttsvd(data, dataNorm, eps=0.1, dtype=<class 'numpy.float32'>):
113def ttsvd(data, dataNorm, eps=0.1, dtype=np.float32):
114    """
115    Computes Tensor-Train decomposition/approximation of tensors using `TTSVD`_ algorithm.
116
117    Parameters
118    ----------
119    data:obj:`numpy.array`
120        Tensor to be decomposed/approximated.
121    dataNorm:obj:`float`
122        Norm of the tensor. This parameter is used to determine the truncation bound.
123    eps:obj:`float`, optional
124        Relative error upper bound for TT-decomposition. Set to 0.1 by default.
125    dtype:obj:`type`, optional
126        Data type to be used during computations. Set to `np.float32` by default .
127
128    Returns
129    -------
130    ranks:obj:`list`
131        List of TT-ranks.
132    cores:obj:`numpy.ndarray`
133        Cores of the TT-approximation.
134
135    .. _TTSVD:
136        https://epubs.siam.org/doi/epdf/10.1137/090752286
137    """
138    inputShape = data.shape
139    dimensions = len(data.shape)
140    delta = (eps / ((dimensions - 1) ** (0.5))) * dataNorm
141    ranks = [1]
142    cores = []
143    for k in range(dimensions - 1):
144        nk = inputShape[k]
145        data = data.reshape(
146            ranks[k] * nk, int(np.prod(data.shape) / (ranks[k] * nk)), order="F"
147        ).astype(dtype)
148        u, s, v = np.linalg.svd(data, False, True)
149        slist = list(s * s)
150        slist.reverse()
151        truncpost = [
152            idx for idx, element in enumerate(np.cumsum(slist)) if element <= delta**2
153        ]
154        ranks.append(len(s) - len(truncpost))
155
156        u = u[:, : ranks[-1]]
157
158        cores.append(u.reshape(ranks[k], nk, ranks[k + 1], order="F"))
159        data = np.zeros_like(v[: ranks[-1], :])
160        for idx, sigma in enumerate(s[: ranks[-1]]):
161            data[idx, :] = sigma * v[idx, :]
162
163    ranks.append(1)
164    cores.append(data.reshape(ranks[-2], inputShape[-1], ranks[-1], order="F"))
165    return ranks, cores

Computes Tensor-Train decomposition/approximation of tensors using TTSVD algorithm.

Parameters
  • datanumpy.array: Tensor to be decomposed/approximated.
  • dataNormfloat: Norm of the tensor. This parameter is used to determine the truncation bound.
  • epsfloat, optional: Relative error upper bound for TT-decomposition. Set to 0.1 by default.
  • dtypetype, optional: Data type to be used during computations. Set to np.float32 by default .
Returns
  • rankslist: List of TT-ranks.
  • coresnumpy.ndarray: Cores of the TT-approximation.
def mode_n_unfolding(tensor, mode):
167def mode_n_unfolding(tensor,mode):
168    # Computes mode-n unfolding/matricization of a tensor in the sense of Kolda&Bader
169    # Assumes the mode is given in 0 indexed format
170    nDims = len(tensor.shape)
171    dims = [dim for dim in range(nDims)]
172    modeIdx = dims.pop(mode)
173    dims=[modeIdx]+dims
174    tensor=tensor.transpose(dims)
175    return tensor.reshape(tensor.shape[0],-1,order='F')
def solve(A, B, method='pinv'):
177def solve(A,B,method='pinv'):
178    if method=='pinv':
179        try:
180            return np.linalg.pinv(A)@B
181        except np.linalg.LinAlgError:
182            print("Numpy svd did not converge, using qr+svd")
183            q, r = np.linalg.qr(A)
184            return q.T@np.linalg.pinv(r)@B
185    elif method=='lstsq':
186        try:
187            return np.linalg.lstsq(A,B,rcond=None)[0]
188        except np.linalg.LinAlgError:
189            print("Numpy svd did not converge, using qr+svd")
190            q, r = np.linalg.qr(A)
191            return q.T@np.linalg.lstsq(r,B)[0]