pub struct TensorTrain<T: TTScalar> { /* private fields */ }Expand description
Tensor Train (Matrix Product State) representation.
A tensor train decomposes a high-dimensional tensor T[i0, i1, ..., i_{L-1}]
into a chain of rank-3 core tensors:
T[i0, i1, ..., i_{L-1}] = A0[i0] * A1[i1] * ... * A_{L-1}[i_{L-1}]where each core Ak has shape (r_{k-1}, d_k, r_k) with:
r_k= bond dimension (link between sitekandk+1),d_k= physical (site) dimension at sitek,r_{-1} = r_{L-1} = 1(boundary condition).
§Construction
TensorTrain::constant– all entries equal to a given valueTensorTrain::zeros– all entries zeroTensorTrain::new– from explicit rank-3 core tensors
§Related types
CompressionOptions– configure compressionTTCache– cached evaluationSiteTensorTrain– center-canonical formVidalTensorTrain– Vidal canonical form
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain};
// Create a constant tensor train: T[i,j,k] = 3.0 for all i,j,k
let tt = TensorTrain::<f64>::constant(&[2, 3, 4], 3.0);
assert_eq!(tt.len(), 3);
assert_eq!(tt.site_dims(), vec![2, 3, 4]);
assert_eq!(tt.link_dims(), vec![1, 1]); // bond dim = 1 for constant
// Evaluate at a specific index
let val = tt.evaluate(&[0, 1, 2]).unwrap();
assert!((val - 3.0).abs() < 1e-12);
// Sum over all indices: 3.0 * 2 * 3 * 4 = 72.0
let s = tt.sum();
assert!((s - 72.0).abs() < 1e-10);Implementations§
Source§impl<T: TTScalar> TensorTrain<T>
impl<T: TTScalar> TensorTrain<T>
Sourcepub fn add(&self, other: &Self) -> Result<Self>
pub fn add(&self, other: &Self) -> Result<Self>
Add two tensor trains element-wise: result[i] = self[i] + other[i].
The result has bond dimension equal to the sum of the input bond
dimensions. Call compress afterward
to reduce the bond dimension.
§Errors
Returns an error if the tensor trains have different lengths or mismatched site dimensions.
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain};
let a = TensorTrain::<f64>::constant(&[2, 3], 1.0);
let b = TensorTrain::<f64>::constant(&[2, 3], 2.0);
let c = a.add(&b).unwrap();
// Every entry = 1 + 2 = 3
assert!((c.evaluate(&[0, 0]).unwrap() - 3.0).abs() < 1e-12);
// Bond dim = 1 + 1 = 2
assert_eq!(c.rank(), 2);Sourcepub fn sub(&self, other: &Self) -> Result<Self>
pub fn sub(&self, other: &Self) -> Result<Self>
Subtract element-wise: result[i] = self[i] - other[i].
Equivalent to self.add(&other.negate()).
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain};
let a = TensorTrain::<f64>::constant(&[2, 3], 5.0);
let b = TensorTrain::<f64>::constant(&[2, 3], 2.0);
let c = a.sub(&b).unwrap();
assert!((c.evaluate(&[0, 0]).unwrap() - 3.0).abs() < 1e-12);Source§impl<T: TTScalar + Scalar + Default> TensorTrain<T>
impl<T: TTScalar + Scalar + Default> TensorTrain<T>
Sourcepub fn compress(&mut self, options: &CompressionOptions) -> Result<()>where
T: MatrixLuciScalar,
DenseFaerLuKernel: PivotKernel<T>,
pub fn compress(&mut self, options: &CompressionOptions) -> Result<()>where
T: MatrixLuciScalar,
DenseFaerLuKernel: PivotKernel<T>,
Compress the tensor train in place, reducing bond dimensions.
The algorithm performs two sweeps:
- Left-to-right: orthogonalize each bond without truncation.
- Right-to-left: truncate each bond according to
options.
After compression, the tensor train approximates the original within the specified tolerance while using smaller bond dimensions.
§Errors
Returns an error if the internal factorization fails.
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain, CompressionOptions};
// Add two constant TTs to get bond dim 2, then compress back to 1
let a = TensorTrain::<f64>::constant(&[2, 3, 4], 1.0);
let b = TensorTrain::<f64>::constant(&[2, 3, 4], 2.0);
let mut sum = a.add(&b).unwrap(); // bond dim = 2
assert_eq!(sum.rank(), 2);
sum.compress(&CompressionOptions::default()).unwrap();
assert_eq!(sum.rank(), 1); // compressed back to optimal
// Values are preserved: 1.0 + 2.0 = 3.0
assert!((sum.evaluate(&[0, 0, 0]).unwrap() - 3.0).abs() < 1e-10);Sourcepub fn compressed(&self, options: &CompressionOptions) -> Result<Self>where
T: MatrixLuciScalar,
DenseFaerLuKernel: PivotKernel<T>,
pub fn compressed(&self, options: &CompressionOptions) -> Result<Self>where
T: MatrixLuciScalar,
DenseFaerLuKernel: PivotKernel<T>,
Return a compressed copy of the tensor train (non-mutating).
Equivalent to cloning and calling compress.
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain, CompressionOptions};
// A tensor train with redundant bond dimension can be compressed
let tt = TensorTrain::<f64>::constant(&[2, 3, 2], 1.0);
let opts = CompressionOptions::default();
let compressed = tt.compressed(&opts).unwrap();
// The compressed TT has the same number of sites
assert_eq!(compressed.len(), tt.len());
// Evaluations agree
let val_orig = tt.evaluate(&[0, 1, 0]).unwrap();
let val_comp = compressed.evaluate(&[0, 1, 0]).unwrap();
assert!((val_orig - val_comp).abs() < 1e-10);Source§impl<T: TTScalar + Scalar + Default + EinsumScalar> TensorTrain<T>
impl<T: TTScalar + Scalar + Default + EinsumScalar> TensorTrain<T>
Sourcepub fn dot(&self, other: &Self) -> Result<T>
pub fn dot(&self, other: &Self) -> Result<T>
Inner product (dot product) of two tensor trains.
Computes sum_i self[i] * other[i] by contracting the site tensors
from left to right. Both tensor trains must have the same length and
matching site dimensions.
§Errors
Returns an error if lengths or site dimensions do not match.
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain};
let a = TensorTrain::<f64>::constant(&[2, 3], 1.0);
let b = TensorTrain::<f64>::constant(&[2, 3], 2.0);
// dot = sum_ij a[i,j]*b[i,j] = 1*2 * 2*3 = 12
let d = a.dot(&b).unwrap();
assert!((d - 12.0).abs() < 1e-10);Source§impl<T: TTScalar> TensorTrain<T>
impl<T: TTScalar> TensorTrain<T>
Sourcepub fn new(tensors: Vec<Tensor3<T>>) -> Result<Self>
pub fn new(tensors: Vec<Tensor3<T>>) -> Result<Self>
Create a new tensor train from a list of rank-3 core tensors.
Each tensor must have shape (left_bond, site_dim, right_bond) where
the right_bond of tensor i equals the left_bond of tensor i+1.
The first tensor must have left_bond = 1 and the last must have
right_bond = 1.
§Errors
Returns TensorTrainError::DimensionMismatch if adjacent bond
dimensions do not match, or TensorTrainError::InvalidOperation if
boundary dimensions are not 1.
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain, Tensor3Ops, tensor3_zeros};
// Build a 2-site TT with bond dimension 1 and site dimensions [2, 3]
let mut t0 = tensor3_zeros::<f64>(1, 2, 1);
t0.set3(0, 0, 0, 1.0);
t0.set3(0, 1, 0, 2.0);
let mut t1 = tensor3_zeros::<f64>(1, 3, 1);
t1.set3(0, 0, 0, 10.0);
t1.set3(0, 1, 0, 20.0);
t1.set3(0, 2, 0, 30.0);
let tt = TensorTrain::new(vec![t0, t1]).unwrap();
assert_eq!(tt.len(), 2);
// T[0, 2] = 1.0 * 30.0 = 30.0
let val = tt.evaluate(&[0, 2]).unwrap();
assert!((val - 30.0).abs() < 1e-12);Sourcepub fn zeros(site_dims: &[usize]) -> Self
pub fn zeros(site_dims: &[usize]) -> Self
Create a tensor train where every entry is zero.
The resulting TT has bond dimension 1 at every link.
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain};
let tt = TensorTrain::<f64>::zeros(&[2, 3]);
assert!((tt.evaluate(&[1, 2]).unwrap()).abs() < 1e-14);
assert!((tt.sum()).abs() < 1e-14);Sourcepub fn constant(site_dims: &[usize], value: T) -> Self
pub fn constant(site_dims: &[usize], value: T) -> Self
Create a tensor train where every entry equals value.
The resulting TT has bond dimension 1 at every link.
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain};
let tt = TensorTrain::<f64>::constant(&[2, 3, 4], 5.0);
// Every entry is 5.0
assert!((tt.evaluate(&[0, 0, 0]).unwrap() - 5.0).abs() < 1e-12);
assert!((tt.evaluate(&[1, 2, 3]).unwrap() - 5.0).abs() < 1e-12);
// Sum = 5.0 * 2 * 3 * 4 = 120.0
assert!((tt.sum() - 120.0).abs() < 1e-10);Sourcepub fn site_tensors_mut(&mut self) -> &mut [Tensor3<T>] ⓘ
pub fn site_tensors_mut(&mut self) -> &mut [Tensor3<T>] ⓘ
Get mutable access to the site tensors
Sourcepub fn scale(&mut self, factor: T)
pub fn scale(&mut self, factor: T)
Multiply every entry of the tensor train by factor in place.
Only the last core tensor is rescaled, so this is an O(d * r^2) operation
where d is the site dimension and r the bond dimension of the last site.
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain};
let mut tt = TensorTrain::<f64>::constant(&[2, 3], 1.0);
tt.scale(3.0);
assert!((tt.evaluate(&[0, 0]).unwrap() - 3.0).abs() < 1e-12);
assert!((tt.sum() - 18.0).abs() < 1e-10);Sourcepub fn scaled(&self, factor: T) -> Self
pub fn scaled(&self, factor: T) -> Self
Return a new tensor train with every entry multiplied by factor.
This is the non-mutating version of scale.
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain};
let tt = TensorTrain::<f64>::constant(&[2, 3], 1.0);
let tt2 = tt.scaled(4.0);
// Original is unchanged
assert!((tt.evaluate(&[0, 0]).unwrap() - 1.0).abs() < 1e-12);
// Scaled copy
assert!((tt2.evaluate(&[0, 0]).unwrap() - 4.0).abs() < 1e-12);Sourcepub fn reverse(&self) -> Self
pub fn reverse(&self) -> Self
Reverse the order of sites in the tensor train.
The reversed TT satisfies reversed.evaluate(&[i_{L-1}, ..., i_0]) == original.evaluate(&[i_0, ..., i_{L-1}]).
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain, Tensor3Ops, tensor3_zeros};
let mut t0 = tensor3_zeros::<f64>(1, 2, 1);
t0.set3(0, 0, 0, 1.0);
t0.set3(0, 1, 0, 2.0);
let mut t1 = tensor3_zeros::<f64>(1, 3, 1);
t1.set3(0, 0, 0, 10.0);
t1.set3(0, 1, 0, 20.0);
t1.set3(0, 2, 0, 30.0);
let tt = TensorTrain::new(vec![t0, t1]).unwrap();
let rev = tt.reverse();
assert_eq!(rev.site_dims(), vec![3, 2]);
// T[0, 1] = 1.0 * 10.0 = 10.0, reversed: T_rev[0, 1] should also be 10.0 (site 0->10, site 1->2)
// Original: T[1, 0] = 2.0 * 10.0 = 20.0
// Reversed: T_rev[0, 1] = 20.0
assert!((rev.evaluate(&[0, 1]).unwrap() - tt.evaluate(&[1, 0]).unwrap()).abs() < 1e-12);Source§impl<T: TTScalar> TensorTrain<T>
impl<T: TTScalar> TensorTrain<T>
Sourcepub fn fulltensor(&self) -> (Vec<T>, Vec<usize>)
pub fn fulltensor(&self) -> (Vec<T>, Vec<usize>)
Materialize the tensor train as a full dense tensor.
Returns (data, shape) where data is a flat vector in column-major
order and shape is the site dimensions. The total number of elements
is prod(shape).
Warning: The full tensor can be extremely large for high-dimensional problems. Only use this for small tensors or debugging.
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain};
let tt = TensorTrain::<f64>::constant(&[2, 3], 7.0);
let (data, shape) = tt.fulltensor();
assert_eq!(shape, vec![2, 3]);
assert_eq!(data.len(), 6);
// Every element should be 7.0
assert!(data.iter().all(|&v| (v - 7.0).abs() < 1e-12));Source§impl<T: TTScalar> TensorTrain<T>
impl<T: TTScalar> TensorTrain<T>
Sourcepub fn partial_sum(&self, dims: &[usize]) -> Result<TensorTrain<T>>
pub fn partial_sum(&self, dims: &[usize]) -> Result<TensorTrain<T>>
Sum (trace out) selected site dimensions, returning a lower-order TT.
dims is a slice of 0-indexed site positions to sum over. The
remaining sites keep their original order. If all dimensions are
summed, the result is a 1-site TT wrapping the scalar total.
§Errors
Returns an error if any element of dims is out of range.
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain};
// 3-site constant TT: T[i,j,k] = 1.0, dims = [2, 3, 4]
let tt = TensorTrain::<f64>::constant(&[2, 3, 4], 1.0);
// Sum over the middle site (index 1): result has dims [2, 4]
let summed = tt.partial_sum(&[1]).unwrap();
assert_eq!(summed.site_dims(), vec![2, 4]);
// Each remaining entry = 1.0 * 3 (summed over dim=3)
let val = summed.evaluate(&[0, 0]).unwrap();
assert!((val - 3.0).abs() < 1e-12);Trait Implementations§
Source§impl<T: TTScalar> AbstractTensorTrain<T> for TensorTrain<T>
impl<T: TTScalar> AbstractTensorTrain<T> for TensorTrain<T>
Source§impl<T: TTScalar> Add for &TensorTrain<T>
impl<T: TTScalar> Add for &TensorTrain<T>
Source§impl<T: TTScalar> Add for TensorTrain<T>
impl<T: TTScalar> Add for TensorTrain<T>
Source§impl<T: Clone + TTScalar> Clone for TensorTrain<T>
impl<T: Clone + TTScalar> Clone for TensorTrain<T>
Source§fn clone(&self) -> TensorTrain<T>
fn clone(&self) -> TensorTrain<T>
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read moreSource§impl<T: TTScalar> Mul<T> for &TensorTrain<T>
impl<T: TTScalar> Mul<T> for &TensorTrain<T>
Source§impl<T: TTScalar> Mul<T> for TensorTrain<T>
impl<T: TTScalar> Mul<T> for TensorTrain<T>
Source§impl<T: TTScalar> Neg for &TensorTrain<T>
impl<T: TTScalar> Neg for &TensorTrain<T>
Source§impl<T: TTScalar> Neg for TensorTrain<T>
impl<T: TTScalar> Neg for TensorTrain<T>
Source§impl<T: TTScalar> Sub for &TensorTrain<T>
impl<T: TTScalar> Sub for &TensorTrain<T>
Auto Trait Implementations§
impl<T> Freeze for TensorTrain<T>
impl<T> RefUnwindSafe for TensorTrain<T>where
T: RefUnwindSafe,
impl<T> Send for TensorTrain<T>
impl<T> Sync for TensorTrain<T>
impl<T> Unpin for TensorTrain<T>where
T: Unpin,
impl<T> UnsafeUnpin for TensorTrain<T>
impl<T> UnwindSafe for TensorTrain<T>where
T: UnwindSafe,
Blanket Implementations§
§impl<Rhs, Lhs, Output> AddByRef<Rhs> for Lhs
impl<Rhs, Lhs, Output> AddByRef<Rhs> for Lhs
type Output = Output
fn add_by_ref(&self, rhs: &Rhs) -> <Lhs as AddByRef<Rhs>>::Output
§impl<Rhs, Lhs, Output> AddByRef<Rhs> for Lhs
impl<Rhs, Lhs, Output> AddByRef<Rhs> for Lhs
type Output = Output
fn add_by_ref(&self, rhs: &Rhs) -> <Lhs as AddByRef<Rhs>>::Output
§impl<U> As for U
impl<U> As for U
§fn as_<T>(self) -> Twhere
T: CastFrom<U>,
fn as_<T>(self) -> Twhere
T: CastFrom<U>,
self to type T. The semantics of numeric casting with the as operator are followed, so <T as As>::as_::<U> can be used in the same way as T as U for numeric conversions. Read moreSource§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
§impl<T> DistributionExt for Twhere
T: ?Sized,
impl<T> DistributionExt for Twhere
T: ?Sized,
fn rand<T>(&self, rng: &mut (impl Rng + ?Sized)) -> Twhere
Self: Distribution<T>,
§impl<T> DistributionExt for Twhere
T: ?Sized,
impl<T> DistributionExt for Twhere
T: ?Sized,
fn rand<T>(&self, rng: &mut (impl Rng + ?Sized)) -> Twhere
Self: Distribution<T>,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more