pub trait AbstractTensorTrain<T: TTScalar>: Sized {
Show 14 methods
// Required methods
fn len(&self) -> usize;
fn site_tensor(&self, i: usize) -> &Tensor3<T>;
fn site_tensors(&self) -> &[Tensor3<T>] ⓘ;
// Provided methods
fn is_empty(&self) -> bool { ... }
fn link_dims(&self) -> Vec<usize> { ... }
fn link_dim(&self, i: usize) -> usize { ... }
fn site_dims(&self) -> Vec<usize> { ... }
fn site_dim(&self, i: usize) -> usize { ... }
fn rank(&self) -> usize { ... }
fn evaluate(&self, indices: &[LocalIndex]) -> Result<T> { ... }
fn sum(&self) -> T { ... }
fn norm2(&self) -> f64 { ... }
fn norm(&self) -> f64 { ... }
fn log_norm(&self) -> f64 { ... }
}Expand description
Common interface implemented by all tensor train representations.
Provides read-only access to site tensors plus derived operations:
evaluate, sum,
norm, and log_norm.
§Implementors
TensorTrain– primary containerSiteTensorTrain– center-canonical formVidalTensorTrain– Vidal form (after conversion toTensorTrain)
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain};
// TensorTrain implements AbstractTensorTrain.
let tt = TensorTrain::<f64>::constant(&[2, 3, 4], 1.0);
// Query structure
assert_eq!(tt.len(), 3);
assert!(!tt.is_empty());
assert_eq!(tt.site_dims(), vec![2, 3, 4]);
assert_eq!(tt.site_dim(1), 3);
assert_eq!(tt.link_dims(), vec![1, 1]);
// Evaluate, sum, and norm
let val = tt.evaluate(&[0, 0, 0]).unwrap();
assert!((val - 1.0).abs() < 1e-12);
let s = tt.sum();
assert!((s - 24.0).abs() < 1e-10);
let n = tt.norm();
assert!((n - 24.0_f64.sqrt()).abs() < 1e-10);Required Methods§
Sourcefn site_tensor(&self, i: usize) -> &Tensor3<T>
fn site_tensor(&self, i: usize) -> &Tensor3<T>
Borrow the rank-3 core tensor at site i.
Sourcefn site_tensors(&self) -> &[Tensor3<T>] ⓘ
fn site_tensors(&self) -> &[Tensor3<T>] ⓘ
Borrow all core tensors as a slice.
Provided Methods§
Sourcefn evaluate(&self, indices: &[LocalIndex]) -> Result<T>
fn evaluate(&self, indices: &[LocalIndex]) -> Result<T>
Evaluate the tensor train at a given index set
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain};
// Constant TT: all values are 5.0
let tt = TensorTrain::<f64>::constant(&[3, 4], 5.0);
let val = tt.evaluate(&[1, 2]).unwrap();
assert!((val - 5.0).abs() < 1e-12);
// Wrong number of indices returns an error
assert!(tt.evaluate(&[0]).is_err());Sourcefn sum(&self) -> T
fn sum(&self) -> T
Sum over all indices of the tensor train
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain};
// Constant TT with value 2.0 over 3×4 grid: sum = 2.0 * 3 * 4 = 24.0
let tt = TensorTrain::<f64>::constant(&[3, 4], 2.0);
let s = tt.sum();
assert!((s - 24.0).abs() < 1e-10);
// Zero TT sums to 0.0
let zero_tt = TensorTrain::<f64>::zeros(&[2, 3]);
assert!((zero_tt.sum() - 0.0).abs() < 1e-12);Sourcefn norm2(&self) -> f64
fn norm2(&self) -> f64
Squared Frobenius norm: sum_i |T[i]|^2.
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain};
// Constant TT: T[i,j] = 2.0 on a 3x4 grid
let tt = TensorTrain::<f64>::constant(&[3, 4], 2.0);
// norm^2 = 2^2 * 3 * 4 = 48
assert!((tt.norm2() - 48.0).abs() < 1e-10);Sourcefn norm(&self) -> f64
fn norm(&self) -> f64
Frobenius norm: sqrt(sum_i |T[i]|^2).
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain};
let tt = TensorTrain::<f64>::constant(&[3, 4], 2.0);
// norm = sqrt(48) ~ 6.928
assert!((tt.norm() - 48.0_f64.sqrt()).abs() < 1e-10);Sourcefn log_norm(&self) -> f64
fn log_norm(&self) -> f64
Logarithm of the Frobenius norm: ln(norm()).
This is more numerically stable than norm().ln() for tensor trains
with very large or very small norms, because it normalizes at each
contraction step to avoid overflow/underflow.
Returns f64::NEG_INFINITY for zero tensor trains.
§Examples
use tensor4all_simplett::{TensorTrain, AbstractTensorTrain};
let tt = TensorTrain::<f64>::constant(&[3, 4], 2.0);
let log_n = tt.log_norm();
assert!((log_n - tt.norm().ln()).abs() < 1e-10);
// Zero TT returns negative infinity
let zero_tt = TensorTrain::<f64>::zeros(&[2, 3]);
assert_eq!(zero_tt.log_norm(), f64::NEG_INFINITY);Dyn Compatibility§
This trait is not dyn compatible.
In older versions of Rust, dyn compatibility was called "object safety", so this trait is not object safe.