Skip to main content

tensor4all_core/
tensor_like.rs

1//! TensorLike trait for unifying tensor types.
2//!
3//! This module provides a fully generic trait for tensor-like objects that expose
4//! external indices and support contraction operations.
5//!
6//! # Design
7//!
8//! The trait is **fully generic** (monomorphic), meaning:
9//! - No trait objects (`dyn TensorLike`)
10//! - Uses associated type for `Index`
11//! - All methods return `Self` instead of concrete types
12//!
13//! For heterogeneous tensor collections, use an enum wrapper.
14
15use crate::any_scalar::AnyScalar;
16use crate::tensor_index::TensorIndex;
17use crate::truncation::SvdTruncationPolicy;
18use anyhow::Result;
19use std::fmt::Debug;
20
21// ============================================================================
22// Factorization types (non-generic, algorithm-specific)
23// ============================================================================
24
25use thiserror::Error;
26
27/// Error type for factorize operations.
28///
29/// # Examples
30///
31/// ```
32/// use tensor4all_core::{factorize, Canonical, DynIndex, FactorizeOptions, TensorDynLen};
33///
34/// let i = DynIndex::new_dyn(3);
35/// let j = DynIndex::new_dyn(3);
36/// let data: Vec<f64> = (0..9).map(|x| x as f64).collect();
37/// let tensor = TensorDynLen::from_dense(vec![i.clone(), j.clone()], data).unwrap();
38///
39/// // QR with Canonical::Right is not supported
40/// let result = factorize(
41///     &tensor,
42///     &[i],
43///     &FactorizeOptions::qr().with_canonical(Canonical::Right),
44/// );
45/// assert!(result.is_err());
46/// ```
47#[derive(Debug, Error)]
48pub enum FactorizeError {
49    /// Factorization computation failed.
50    #[error("Factorization failed: {0}")]
51    ComputationError(
52        /// The underlying error
53        #[from]
54        anyhow::Error,
55    ),
56    /// Invalid relative tolerance value (must be finite and non-negative).
57    #[error("Invalid rtol value: {0}. rtol must be finite and non-negative.")]
58    InvalidRtol(
59        /// The invalid rtol value
60        f64,
61    ),
62    /// Invalid algorithm-specific option combination.
63    #[error("Invalid factorize options: {0}")]
64    InvalidOptions(
65        /// Description of the invalid option combination
66        &'static str,
67    ),
68    /// The storage type is not supported for this operation.
69    #[error("Unsupported storage type: {0}")]
70    UnsupportedStorage(
71        /// Description of the unsupported storage type
72        &'static str,
73    ),
74    /// The canonical direction is not supported for this algorithm.
75    #[error("Unsupported canonical direction for this algorithm: {0}")]
76    UnsupportedCanonical(
77        /// Description of the unsupported canonical direction
78        &'static str,
79    ),
80    /// Error from SVD operation.
81    #[error("SVD error: {0}")]
82    SvdError(
83        /// The underlying SVD error
84        #[from]
85        crate::svd::SvdError,
86    ),
87    /// Error from QR operation.
88    #[error("QR error: {0}")]
89    QrError(
90        /// The underlying QR error
91        #[from]
92        crate::qr::QrError,
93    ),
94    /// Error from matrix CI operation.
95    #[error("Matrix CI error: {0}")]
96    MatrixCIError(
97        /// The underlying matrix CI error
98        #[from]
99        tensor4all_tcicore::MatrixCIError,
100    ),
101}
102
103/// Factorization algorithm.
104///
105/// Determines which matrix decomposition is used by [`TensorLike::factorize`].
106///
107/// # Examples
108///
109/// ```
110/// use tensor4all_core::FactorizeAlg;
111///
112/// // Default is SVD
113/// assert_eq!(FactorizeAlg::default(), FactorizeAlg::SVD);
114/// ```
115#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
116pub enum FactorizeAlg {
117    /// Singular Value Decomposition.
118    #[default]
119    SVD,
120    /// QR decomposition.
121    QR,
122    /// Rank-revealing LU decomposition.
123    LU,
124    /// Cross Interpolation (LU-based).
125    CI,
126}
127
128/// Canonical direction for factorization.
129///
130/// This determines which factor is "canonical" (orthogonal for SVD/QR,
131/// or unit-diagonal for LU/CI).
132///
133/// # Examples
134///
135/// ```
136/// use tensor4all_core::{factorize, Canonical, DynIndex, FactorizeOptions, TensorDynLen};
137///
138/// let i = DynIndex::new_dyn(3);
139/// let j = DynIndex::new_dyn(3);
140/// let data: Vec<f64> = (0..9).map(|x| x as f64).collect();
141/// let tensor = TensorDynLen::from_dense(vec![i.clone(), j.clone()], data).unwrap();
142///
143/// // Left canonical: left factor has orthonormal columns
144/// let left_result = factorize(
145///     &tensor,
146///     &[i.clone()],
147///     &FactorizeOptions::svd().with_canonical(Canonical::Left),
148/// ).unwrap();
149///
150/// // Right canonical: right factor has orthonormal rows
151/// let right_result = factorize(
152///     &tensor,
153///     &[i.clone()],
154///     &FactorizeOptions::svd().with_canonical(Canonical::Right),
155/// ).unwrap();
156///
157/// // Both recover the same tensor
158/// let recovered_left = left_result.left.contract(&left_result.right);
159/// let recovered_right = right_result.left.contract(&right_result.right);
160/// assert!(recovered_left.distance(&recovered_right) < 1e-12);
161/// ```
162#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
163pub enum Canonical {
164    /// Left factor is canonical.
165    /// - SVD: L=U (orthogonal), R=S*V
166    /// - QR: L=Q (orthogonal), R=R
167    /// - LU/CI: L has unit diagonal
168    #[default]
169    Left,
170    /// Right factor is canonical.
171    /// - SVD: L=U*S, R=V (orthogonal)
172    /// - QR: Not supported (would need LQ)
173    /// - LU/CI: U has unit diagonal
174    Right,
175}
176
177/// Options for tensor factorization.
178///
179/// Controls the algorithm, canonical direction, and truncation parameters
180/// for [`TensorLike::factorize`].
181///
182/// # Defaults
183///
184/// - Algorithm: SVD
185/// - Canonical: Left (left factor is orthogonal)
186/// - max_rank: `None` (no rank limit)
187/// - svd_policy: `None` (uses the SVD global default policy)
188/// - qr_rtol: `None` (uses the QR global default tolerance)
189///
190/// # Field Interactions
191///
192/// - `svd_policy` is only valid for `FactorizeAlg::SVD`.
193/// - `qr_rtol` is only valid for `FactorizeAlg::QR`.
194/// - `max_rank` is independent of the algorithm-specific tolerance settings.
195///
196/// # Examples
197///
198/// ```
199/// use tensor4all_core::{
200///     factorize, Canonical, DynIndex, FactorizeOptions, TensorDynLen,
201/// };
202///
203/// let i = DynIndex::new_dyn(4);
204/// let j = DynIndex::new_dyn(4);
205/// let mut data = vec![0.0_f64; 16];
206/// data[0] = 1.0;  // rank-1 matrix
207/// let tensor = TensorDynLen::from_dense(vec![i.clone(), j.clone()], data).unwrap();
208///
209/// // SVD with an explicit policy
210/// let opts = FactorizeOptions::svd()
211///     .with_svd_policy(tensor4all_core::SvdTruncationPolicy::new(1e-10));
212/// let result = factorize(&tensor, &[i.clone()], &opts).unwrap();
213/// assert_eq!(result.rank, 1);
214///
215/// // QR with max-rank truncation
216/// let opts = FactorizeOptions::qr().with_qr_rtol(1e-8).with_max_rank(2);
217/// let result = factorize(&tensor, &[i.clone()], &opts).unwrap();
218/// assert!(result.rank <= 2);
219/// ```
220#[derive(Debug, Clone)]
221pub struct FactorizeOptions {
222    /// Factorization algorithm to use.
223    pub alg: FactorizeAlg,
224    /// Canonical direction.
225    pub canonical: Canonical,
226    /// Maximum rank for truncation.
227    /// If `None`, no rank limit is applied.
228    pub max_rank: Option<usize>,
229    /// SVD truncation policy.
230    /// If `None`, uses the SVD global default.
231    pub svd_policy: Option<SvdTruncationPolicy>,
232    /// QR-specific relative tolerance.
233    /// If `None`, uses the QR global default.
234    pub qr_rtol: Option<f64>,
235}
236
237impl Default for FactorizeOptions {
238    fn default() -> Self {
239        Self {
240            alg: FactorizeAlg::SVD,
241            canonical: Canonical::Left,
242            max_rank: None,
243            svd_policy: None,
244            qr_rtol: None,
245        }
246    }
247}
248
249impl FactorizeOptions {
250    /// Create options for SVD factorization.
251    ///
252    /// # Examples
253    ///
254    /// ```
255    /// use tensor4all_core::{FactorizeAlg, FactorizeOptions};
256    ///
257    /// let opts = FactorizeOptions::svd();
258    /// assert_eq!(opts.alg, FactorizeAlg::SVD);
259    /// ```
260    pub fn svd() -> Self {
261        Self {
262            alg: FactorizeAlg::SVD,
263            ..Default::default()
264        }
265    }
266
267    /// Create options for QR factorization.
268    ///
269    /// # Examples
270    ///
271    /// ```
272    /// use tensor4all_core::{FactorizeAlg, FactorizeOptions};
273    ///
274    /// let opts = FactorizeOptions::qr();
275    /// assert_eq!(opts.alg, FactorizeAlg::QR);
276    /// ```
277    pub fn qr() -> Self {
278        Self {
279            alg: FactorizeAlg::QR,
280            ..Default::default()
281        }
282    }
283
284    /// Create options for LU factorization.
285    ///
286    /// # Examples
287    ///
288    /// ```
289    /// use tensor4all_core::{FactorizeAlg, FactorizeOptions};
290    ///
291    /// let opts = FactorizeOptions::lu();
292    /// assert_eq!(opts.alg, FactorizeAlg::LU);
293    /// ```
294    pub fn lu() -> Self {
295        Self {
296            alg: FactorizeAlg::LU,
297            ..Default::default()
298        }
299    }
300
301    /// Create options for CI factorization.
302    ///
303    /// # Examples
304    ///
305    /// ```
306    /// use tensor4all_core::{FactorizeAlg, FactorizeOptions};
307    ///
308    /// let opts = FactorizeOptions::ci();
309    /// assert_eq!(opts.alg, FactorizeAlg::CI);
310    /// ```
311    pub fn ci() -> Self {
312        Self {
313            alg: FactorizeAlg::CI,
314            ..Default::default()
315        }
316    }
317
318    /// Set canonical direction.
319    ///
320    /// # Examples
321    ///
322    /// ```
323    /// use tensor4all_core::{Canonical, FactorizeOptions};
324    ///
325    /// let opts = FactorizeOptions::svd().with_canonical(Canonical::Right);
326    /// assert_eq!(opts.canonical, Canonical::Right);
327    /// ```
328    pub fn with_canonical(mut self, canonical: Canonical) -> Self {
329        self.canonical = canonical;
330        self
331    }
332
333    /// Set the SVD truncation policy.
334    ///
335    /// # Examples
336    ///
337    /// ```
338    /// use tensor4all_core::{FactorizeOptions, SvdTruncationPolicy};
339    ///
340    /// let opts = FactorizeOptions::svd().with_svd_policy(SvdTruncationPolicy::new(1e-8));
341    /// assert_eq!(opts.svd_policy, Some(SvdTruncationPolicy::new(1e-8)));
342    /// ```
343    pub fn with_svd_policy(mut self, policy: SvdTruncationPolicy) -> Self {
344        self.svd_policy = Some(policy);
345        self
346    }
347
348    /// Set the QR-specific relative tolerance.
349    ///
350    /// # Examples
351    ///
352    /// ```
353    /// use tensor4all_core::FactorizeOptions;
354    ///
355    /// let opts = FactorizeOptions::qr().with_qr_rtol(1e-8);
356    /// assert_eq!(opts.qr_rtol, Some(1e-8));
357    /// ```
358    pub fn with_qr_rtol(mut self, rtol: f64) -> Self {
359        self.qr_rtol = Some(rtol);
360        self
361    }
362
363    /// Set maximum rank.
364    ///
365    /// # Examples
366    ///
367    /// ```
368    /// use tensor4all_core::FactorizeOptions;
369    ///
370    /// let opts = FactorizeOptions::svd().with_max_rank(10);
371    /// assert_eq!(opts.max_rank, Some(10));
372    /// ```
373    pub fn with_max_rank(mut self, max_rank: usize) -> Self {
374        self.max_rank = Some(max_rank);
375        self
376    }
377
378    /// Validate that the selected fields make sense for the chosen algorithm.
379    ///
380    /// # Errors
381    ///
382    /// Returns [`FactorizeError::InvalidOptions`] if an algorithm is paired with
383    /// unsupported algorithm-specific truncation settings.
384    pub fn validate(&self) -> std::result::Result<(), FactorizeError> {
385        match self.alg {
386            FactorizeAlg::SVD => {
387                if self.qr_rtol.is_some() {
388                    return Err(FactorizeError::InvalidOptions(
389                        "SVD factorization does not accept qr_rtol",
390                    ));
391                }
392            }
393            FactorizeAlg::QR => {
394                if self.svd_policy.is_some() {
395                    return Err(FactorizeError::InvalidOptions(
396                        "QR factorization does not accept svd_policy",
397                    ));
398                }
399            }
400            FactorizeAlg::LU | FactorizeAlg::CI => {
401                if self.svd_policy.is_some() {
402                    return Err(FactorizeError::InvalidOptions(
403                        "LU/CI factorization does not accept svd_policy",
404                    ));
405                }
406                if self.qr_rtol.is_some() {
407                    return Err(FactorizeError::InvalidOptions(
408                        "LU/CI factorization does not accept qr_rtol",
409                    ));
410                }
411            }
412        }
413
414        Ok(())
415    }
416}
417
418/// Result of tensor factorization.
419///
420/// Contains the two factors, the bond index connecting them, and metadata
421/// about the decomposition. The original tensor can be recovered (up to
422/// truncation error) by contracting `left` and `right` along `bond_index`.
423///
424/// # Examples
425///
426/// ```
427/// use tensor4all_core::{factorize, DynIndex, FactorizeOptions, TensorDynLen};
428///
429/// let i = DynIndex::new_dyn(3);
430/// let j = DynIndex::new_dyn(4);
431/// let data: Vec<f64> = (0..12).map(|x| x as f64).collect();
432/// let tensor = TensorDynLen::from_dense(vec![i.clone(), j.clone()], data).unwrap();
433///
434/// let result = factorize(&tensor, &[i.clone()], &FactorizeOptions::svd()).unwrap();
435///
436/// // Contracting left * right recovers the original tensor
437/// let recovered = result.left.contract(&result.right);
438/// assert!(tensor.distance(&recovered) < 1e-12);
439///
440/// // SVD provides singular values
441/// assert!(result.singular_values.is_some());
442/// assert_eq!(result.singular_values.as_ref().unwrap().len(), result.rank);
443/// ```
444#[derive(Debug, Clone)]
445pub struct FactorizeResult<T: TensorLike> {
446    /// Left factor tensor.
447    pub left: T,
448    /// Right factor tensor.
449    pub right: T,
450    /// Bond index connecting left and right factors.
451    pub bond_index: T::Index,
452    /// Singular values (only for SVD).
453    pub singular_values: Option<Vec<f64>>,
454    /// Rank of the factorization.
455    pub rank: usize,
456}
457
458// ============================================================================
459// Contraction types
460// ============================================================================
461
462/// Specifies which tensor pairs are allowed to contract.
463///
464/// This enum controls which tensor pairs can have their indices contracted
465/// in multi-tensor contraction operations. This is useful for tensor networks
466/// where the graph structure determines which tensors are connected.
467///
468/// # Example
469///
470/// ```
471/// use tensor4all_core::{AllowedPairs, DynIndex, TensorDynLen, TensorLike};
472///
473/// # fn main() -> anyhow::Result<()> {
474/// let i = DynIndex::new_dyn(2);
475/// let j = DynIndex::new_dyn(2);
476/// let k = DynIndex::new_dyn(2);
477///
478/// let a = TensorDynLen::from_dense(
479///     vec![i.clone(), j.clone()],
480///     vec![1.0, 0.0, 0.0, 1.0],
481/// )?;
482/// let b = TensorDynLen::from_dense(
483///     vec![j.clone(), k.clone()],
484///     vec![1.0, 2.0, 3.0, 4.0],
485/// )?;
486/// let c = TensorDynLen::from_dense(vec![k.clone()], vec![1.0, 10.0])?;
487///
488/// let tensor_refs: Vec<&TensorDynLen> = vec![&a, &b, &c];
489/// let all = <TensorDynLen as TensorLike>::contract(&tensor_refs, AllowedPairs::All)?;
490///
491/// let edges = vec![(0, 1), (1, 2)];
492/// let specified =
493///     <TensorDynLen as TensorLike>::contract(&tensor_refs, AllowedPairs::Specified(&edges))?;
494///
495/// assert_eq!(all.dims(), vec![2]);
496/// assert!(all.sub(&specified)?.maxabs() < 1e-12);
497/// # Ok(())
498/// # }
499/// ```
500#[derive(Debug, Clone, Copy)]
501pub enum AllowedPairs<'a> {
502    /// All tensor pairs are allowed to contract.
503    ///
504    /// Indices with matching IDs across any two tensors will be contracted.
505    /// This is the default behavior, equivalent to ITensor's `*` operator.
506    All,
507    /// Only specified tensor pairs are allowed to contract.
508    ///
509    /// Each pair is `(tensor_idx_a, tensor_idx_b)` into the input tensor slice.
510    /// Indices are only contracted if they belong to an allowed pair.
511    ///
512    /// This is useful for tensor networks where the graph structure
513    /// determines which tensors are connected (e.g., TreeTN edges).
514    Specified(&'a [(usize, usize)]),
515}
516
517/// Linearization order used when fusing or unfusing multiple logical indices
518/// into one physical index.
519///
520/// This matters for exact reshape-style operations such as replacing one fused
521/// index with several unfused indices.
522///
523/// # Examples
524///
525/// ```
526/// use tensor4all_core::LinearizationOrder;
527///
528/// assert_eq!(LinearizationOrder::ColumnMajor.as_str(), "column-major");
529/// assert_eq!(LinearizationOrder::RowMajor.as_str(), "row-major");
530/// ```
531#[derive(Debug, Clone, Copy, PartialEq, Eq)]
532pub enum LinearizationOrder {
533    /// First index changes fastest.
534    ColumnMajor,
535    /// Last index changes fastest.
536    RowMajor,
537}
538
539impl LinearizationOrder {
540    /// Return a short human-readable description.
541    ///
542    /// # Examples
543    ///
544    /// ```
545    /// use tensor4all_core::LinearizationOrder;
546    ///
547    /// assert_eq!(LinearizationOrder::ColumnMajor.as_str(), "column-major");
548    /// ```
549    pub fn as_str(self) -> &'static str {
550        match self {
551            Self::ColumnMajor => "column-major",
552            Self::RowMajor => "row-major",
553        }
554    }
555}
556
557// ============================================================================
558// TensorLike trait (fully generic)
559// ============================================================================
560
561/// Trait for tensor-like objects that expose external indices and support contraction.
562///
563/// This trait is **fully generic** (monomorphic), meaning it does not support
564/// trait objects (`dyn TensorLike`). For heterogeneous tensor collections,
565/// use an enum wrapper instead.
566///
567/// # Design Principles
568///
569/// - **Minimal interface**: Only external indices and automatic contraction
570/// - **Fully generic**: Uses associated type for `Index`, returns `Self`
571/// - **Stable ordering**: `external_indices()` returns indices in deterministic order
572/// - **No trait objects**: Requires `Sized`, cannot use `dyn TensorLike`
573///
574/// # Example
575///
576/// ```
577/// use tensor4all_core::{AllowedPairs, DynIndex, TensorDynLen, TensorLike};
578///
579/// fn contract_pair(a: &TensorDynLen, b: &TensorDynLen) -> anyhow::Result<TensorDynLen> {
580///     Ok(<TensorDynLen as TensorLike>::contract(&[a, b], AllowedPairs::All)?)
581/// }
582///
583/// # fn main() -> anyhow::Result<()> {
584/// let i = DynIndex::new_dyn(2);
585/// let j = DynIndex::new_dyn(2);
586/// let a = TensorDynLen::from_dense(
587///     vec![i.clone(), j.clone()],
588///     vec![1.0, 0.0, 0.0, 1.0],
589/// )?;
590/// let b = TensorDynLen::from_dense(vec![j.clone()], vec![2.0, 3.0])?;
591///
592/// let result = contract_pair(&a, &b)?;
593/// assert_eq!(result.to_vec::<f64>()?, vec![2.0, 3.0]);
594/// # Ok(())
595/// # }
596/// ```
597///
598/// # Heterogeneous Collections
599///
600/// For mixing different tensor types, define an enum:
601///
602/// ```
603/// use tensor4all_core::{block_tensor::BlockTensor, DynIndex, TensorDynLen};
604///
605/// let i = DynIndex::new_dyn(2);
606/// let dense = TensorDynLen::from_dense(vec![i.clone()], vec![1.0, 2.0]).unwrap();
607/// let block = BlockTensor::new(vec![dense.clone()], (1, 1));
608///
609/// enum TensorNetwork {
610///     Dense(TensorDynLen),
611///     Block(BlockTensor<TensorDynLen>),
612/// }
613///
614/// let network = TensorNetwork::Block(block);
615/// assert!(matches!(network, TensorNetwork::Block(_)));
616/// ```
617///
618/// # Supertrait
619///
620/// `TensorLike` extends `TensorIndex`, which provides:
621/// - `external_indices()` - Get all external indices
622/// - `num_external_indices()` - Count external indices
623/// - `replaceind()` / `replaceinds()` - Replace indices
624///
625/// This separation allows tensor networks (like `TreeTN`) to implement
626/// index operations without implementing contraction/factorization.
627pub trait TensorLike: TensorIndex {
628    /// Factorize this tensor into left and right factors.
629    ///
630    /// This function dispatches to the appropriate algorithm based on `options.alg`:
631    /// - `SVD`: Singular Value Decomposition
632    /// - `QR`: QR decomposition
633    /// - `LU`: Rank-revealing LU decomposition
634    /// - `CI`: Cross Interpolation
635    ///
636    /// The `canonical` option controls which factor is "canonical":
637    /// - `Canonical::Left`: Left factor is orthogonal (SVD/QR) or unit-diagonal (LU/CI)
638    /// - `Canonical::Right`: Right factor is orthogonal (SVD) or unit-diagonal (LU/CI)
639    ///
640    /// # Arguments
641    /// * `left_inds` - Indices to place on the left side
642    /// * `options` - Factorization options
643    ///
644    /// # Returns
645    /// A `FactorizeResult` containing the left and right factors, bond index,
646    /// singular values (for SVD), and rank.
647    ///
648    /// # Errors
649    /// Returns `FactorizeError` if:
650    /// - The storage type is not supported (only DenseF64 and DenseC64)
651    /// - QR is used with `Canonical::Right`
652    /// - The underlying algorithm fails
653    fn factorize(
654        &self,
655        left_inds: &[<Self as TensorIndex>::Index],
656        options: &FactorizeOptions,
657    ) -> std::result::Result<FactorizeResult<Self>, FactorizeError>;
658
659    /// Factorize this tensor without applying truncation controls.
660    ///
661    /// Use this for exact tensor rewrites such as canonicalization, where the
662    /// contracted factors must preserve the represented tensor up to numerical
663    /// roundoff. Unlike [`Self::factorize`], this method must not consult global
664    /// SVD/QR/LU truncation defaults or apply maximum-rank limits.
665    ///
666    /// # Arguments
667    /// * `left_inds` - Indices to place on the left side.
668    /// * `alg` - Decomposition algorithm to use.
669    /// * `canonical` - Which factor should carry the canonical form.
670    ///
671    /// # Returns
672    /// A `FactorizeResult` containing the left and right factors, bond index,
673    /// singular values for SVD, and retained exact numerical rank.
674    ///
675    /// # Errors
676    /// Returns [`FactorizeError`] if:
677    /// - the storage type is not supported,
678    /// - the requested canonical direction is unsupported for the algorithm, or
679    /// - the underlying decomposition fails.
680    ///
681    /// # Examples
682    ///
683    /// ```
684    /// use tensor4all_core::{
685    ///     Canonical, DynIndex, FactorizeAlg, TensorDynLen, TensorLike,
686    /// };
687    ///
688    /// let i = DynIndex::new_dyn(2);
689    /// let j = DynIndex::new_dyn(2);
690    /// let tensor = TensorDynLen::from_dense(
691    ///     vec![i.clone(), j.clone()],
692    ///     vec![1.0_f64, 0.0, 0.0, 1.0e-16],
693    /// )?;
694    ///
695    /// let result = tensor.factorize_full_rank(
696    ///     std::slice::from_ref(&i),
697    ///     FactorizeAlg::QR,
698    ///     Canonical::Left,
699    /// )?;
700    /// let reconstructed = result.left.contract(&result.right);
701    /// assert!((tensor - reconstructed).maxabs() < 1.0e-18);
702    /// # Ok::<(), Box<dyn std::error::Error>>(())
703    /// ```
704    fn factorize_full_rank(
705        &self,
706        left_inds: &[<Self as TensorIndex>::Index],
707        alg: FactorizeAlg,
708        canonical: Canonical,
709    ) -> std::result::Result<FactorizeResult<Self>, FactorizeError>;
710
711    /// Tensor conjugate operation.
712    ///
713    /// This is a generalized conjugate operation that depends on the tensor type:
714    /// - For dense tensors (TensorDynLen): element-wise complex conjugate
715    /// - For symmetric tensors: tensor conjugate considering symmetry sectors
716    ///
717    /// This operation is essential for computing inner products and overlaps
718    /// in tensor network algorithms like fitting.
719    ///
720    /// # Returns
721    /// A new tensor representing the tensor conjugate.
722    fn conj(&self) -> Self;
723
724    /// Direct sum of two tensors along specified index pairs.
725    ///
726    /// For tensors A and B with indices to be summed specified as pairs,
727    /// creates a new tensor C where each paired index has dimension = dim_A + dim_B.
728    /// Non-paired indices must match exactly between A and B (same ID).
729    ///
730    /// # Arguments
731    ///
732    /// * `other` - Second tensor
733    /// * `pairs` - Pairs of (self_index, other_index) to be summed. Each pair creates
734    ///   a new index in the result with dimension = dim(self_index) + dim(other_index).
735    ///
736    /// # Returns
737    ///
738    /// A `DirectSumResult` containing the result tensor and new indices created
739    /// for the summed dimensions (one per pair).
740    ///
741    /// # Example
742    ///
743    /// ```
744    /// use tensor4all_core::{DynIndex, TensorDynLen, TensorLike};
745    ///
746    /// # fn main() -> anyhow::Result<()> {
747    /// let j = DynIndex::new_dyn(2);
748    /// let k = DynIndex::new_dyn(3);
749    ///
750    /// let a = TensorDynLen::from_dense(vec![j.clone()], vec![1.0, 2.0])?;
751    /// let b = TensorDynLen::from_dense(vec![k.clone()], vec![3.0, 4.0, 5.0])?;
752    /// let result = a.direct_sum(&b, &[(j.clone(), k.clone())])?;
753    ///
754    /// assert_eq!(result.new_indices.len(), 1);
755    /// assert_eq!(result.tensor.dims(), vec![5]);
756    /// assert_eq!(result.tensor.to_vec::<f64>()?, vec![1.0, 2.0, 3.0, 4.0, 5.0]);
757    /// # Ok(())
758    /// # }
759    /// ```
760    fn direct_sum(
761        &self,
762        other: &Self,
763        pairs: &[(<Self as TensorIndex>::Index, <Self as TensorIndex>::Index)],
764    ) -> Result<DirectSumResult<Self>>;
765
766    /// Outer product (tensor product) of two tensors.
767    ///
768    /// Computes the tensor product of `self` and `other`, resulting in a tensor
769    /// with all indices from both tensors. No indices are contracted.
770    ///
771    /// # Arguments
772    ///
773    /// * `other` - The other tensor to compute outer product with
774    ///
775    /// # Returns
776    ///
777    /// A new tensor representing the outer product.
778    ///
779    /// # Errors
780    ///
781    /// Returns an error if the tensors have common indices (by ID).
782    /// Use `tensordot` for contraction when indices overlap.
783    fn outer_product(&self, other: &Self) -> Result<Self>;
784
785    /// Compute the squared Frobenius norm of the tensor.
786    ///
787    /// The squared Frobenius norm is defined as the sum of squared absolute values
788    /// of all tensor elements: `||T||_F^2 = sum_i |T_i|^2`.
789    ///
790    /// This is used for computing norms in tensor network algorithms,
791    /// convergence checks, and normalization.
792    ///
793    /// # Returns
794    /// The squared Frobenius norm as a non-negative f64.
795    fn norm_squared(&self) -> f64;
796
797    /// Permute tensor indices to match the specified order.
798    ///
799    /// This reorders the tensor's axes to match the order specified by `new_order`.
800    /// The indices in `new_order` are matched by ID with the tensor's current indices.
801    ///
802    /// # Arguments
803    ///
804    /// * `new_order` - The desired order of indices (matched by ID)
805    ///
806    /// # Returns
807    ///
808    /// A new tensor with permuted indices.
809    ///
810    /// # Errors
811    ///
812    /// Returns an error if:
813    /// - The number of indices doesn't match
814    /// - An index ID in `new_order` is not found in the tensor
815    fn permuteinds(&self, new_order: &[<Self as TensorIndex>::Index]) -> Result<Self>;
816
817    /// Fuse local tensor indices into one replacement index.
818    ///
819    /// This is a local axis fusion operation: it reshapes the tensor so
820    /// `old_indices` are replaced by `new_index`. Indices are matched by ID,
821    /// and `old_indices` must be non-empty. The order of `old_indices` defines
822    /// the fused coordinate linearization, while `order` defines how those
823    /// coordinates map to the replacement axis. `new_index.dim()` must equal
824    /// the product of the matched axis dimensions. The replacement index is
825    /// inserted at the earliest fused axis position, and the remaining indices
826    /// retain their relative order.
827    ///
828    /// Implementations should return `Err` if this operation is unsupported or
829    /// if exact local fusion cannot be represented by the tensor type.
830    ///
831    /// # Arguments
832    ///
833    /// * `old_indices` - Existing local indices to fuse, matched by ID. Must be non-empty.
834    /// * `new_index` - Replacement index whose dimension is the fused product.
835    /// * `order` - Linearization order for mapping old coordinates to the fused axis.
836    ///
837    /// # Returns
838    ///
839    /// A new tensor with `old_indices` replaced by `new_index`.
840    ///
841    /// # Errors
842    ///
843    /// Returns an error if:
844    /// - `old_indices` is empty
845    /// - Any requested index ID is missing from the tensor
846    /// - The replacement dimension does not match the product of fused axis dimensions
847    /// - The tensor type does not support local axis fusion
848    /// - Exact local fusion cannot be represented by the tensor type
849    ///
850    /// # Examples
851    ///
852    /// ```
853    /// use tensor4all_core::{
854    ///     DynIndex, IndexLike, LinearizationOrder, TensorDynLen, TensorLike,
855    /// };
856    ///
857    /// # fn main() -> anyhow::Result<()> {
858    /// let i = DynIndex::new_dyn(2);
859    /// let j = DynIndex::new_dyn(3);
860    /// let k = DynIndex::new_dyn(2);
861    /// let fused = DynIndex::new_link(6)?;
862    /// let data: Vec<f64> = (0..12).map(|value| value as f64).collect();
863    /// let tensor = TensorDynLen::from_dense(vec![i.clone(), j.clone(), k.clone()], data)?;
864    ///
865    /// let fused_tensor = <TensorDynLen as TensorLike>::fuse_indices(
866    ///     &tensor,
867    ///     &[j.clone(), i.clone()],
868    ///     fused.clone(),
869    ///     LinearizationOrder::ColumnMajor,
870    /// )?;
871    ///
872    /// assert_eq!(fused_tensor.indices(), &[fused.clone(), k.clone()]);
873    /// assert_eq!(fused_tensor.dims(), vec![6, 2]);
874    ///
875    /// let roundtrip = fused_tensor
876    ///     .unfuse_index(&fused, &[j, i], LinearizationOrder::ColumnMajor)?
877    ///     .permuteinds(tensor.indices())?;
878    /// assert!(roundtrip.isapprox(&tensor, 1e-12, 0.0));
879    /// # Ok(())
880    /// # }
881    /// ```
882    fn fuse_indices(
883        &self,
884        old_indices: &[<Self as TensorIndex>::Index],
885        new_index: <Self as TensorIndex>::Index,
886        order: LinearizationOrder,
887    ) -> Result<Self>;
888
889    /// Contract multiple tensors over their contractable indices.
890    ///
891    /// This method contracts 2 or more tensors. Pairs of indices that satisfy
892    /// `is_contractable()` (same ID, same dimension, compatible ConjState)
893    /// are contracted based on the `allowed` parameter.
894    ///
895    /// Handles disconnected tensor graphs automatically by:
896    /// 1. Finding connected components based on contractable indices
897    /// 2. Contracting each connected component separately
898    /// 3. Combining results using outer product
899    ///
900    /// # Arguments
901    ///
902    /// * `tensors` - Slice of tensor references to contract (must have length >= 1)
903    /// * `allowed` - Specifies which tensor pairs can have their indices contracted:
904    ///   - `AllowedPairs::All`: Contract all contractable index pairs (default behavior)
905    ///   - `AllowedPairs::Specified(&[(i, j)])`: Only contract indices between specified tensor pairs
906    ///
907    /// # Returns
908    ///
909    /// A new tensor representing the contracted result.
910    /// If tensors form disconnected components, they are combined via outer product.
911    ///
912    /// # Behavior by N
913    /// - N=0: Error
914    /// - N=1: Clone of input
915    /// - N>=2: Contract connected components, combine with outer product
916    ///
917    /// # Errors
918    ///
919    /// Returns an error if:
920    /// - No tensors are provided
921    /// - `AllowedPairs::Specified` contains a pair with no contractable indices
922    ///
923    /// # Example
924    ///
925    /// ```
926    /// use tensor4all_core::{AllowedPairs, DynIndex, TensorDynLen, TensorLike};
927    ///
928    /// # fn main() -> anyhow::Result<()> {
929    /// let i = DynIndex::new_dyn(2);
930    /// let j = DynIndex::new_dyn(2);
931    /// let k = DynIndex::new_dyn(2);
932    ///
933    /// let a = TensorDynLen::from_dense(
934    ///     vec![i.clone(), j.clone()],
935    ///     vec![1.0, 0.0, 0.0, 1.0],
936    /// )?;
937    /// let b = TensorDynLen::from_dense(
938    ///     vec![j.clone(), k.clone()],
939    ///     vec![1.0, 2.0, 3.0, 4.0],
940    /// )?;
941    /// let c = TensorDynLen::from_dense(vec![k.clone()], vec![1.0, 10.0])?;
942    ///
943    /// let all = <TensorDynLen as TensorLike>::contract(&[&a, &b, &c], AllowedPairs::All)?;
944    /// let specified = <TensorDynLen as TensorLike>::contract(
945    ///     &[&a, &b, &c],
946    ///     AllowedPairs::Specified(&[(0, 1), (1, 2)]),
947    /// )?;
948    ///
949    /// assert_eq!(all.dims(), vec![2]);
950    /// assert!(all.sub(&specified)?.maxabs() < 1e-12);
951    /// # Ok(())
952    /// # }
953    /// ```
954    fn contract(tensors: &[&Self], allowed: AllowedPairs<'_>) -> Result<Self>;
955
956    /// Contract multiple tensors that must form a connected graph.
957    ///
958    /// This is the core contraction method that requires all tensors to be
959    /// connected through contractable indices. Use [`Self::contract`] if you want
960    /// automatic handling of disconnected components via outer product.
961    ///
962    /// # Arguments
963    ///
964    /// * `tensors` - Slice of tensor references to contract (must form a connected graph)
965    /// * `allowed` - Specifies which tensor pairs can have their indices contracted
966    ///
967    /// # Returns
968    ///
969    /// A new tensor representing the contracted result.
970    ///
971    /// # Errors
972    ///
973    /// Returns an error if:
974    /// - No tensors are provided
975    /// - The tensors form a disconnected graph
976    ///
977    /// # Example
978    ///
979    /// ```
980    /// use tensor4all_core::{AllowedPairs, DynIndex, TensorDynLen, TensorLike};
981    ///
982    /// # fn main() -> anyhow::Result<()> {
983    /// let i = DynIndex::new_dyn(2);
984    /// let j = DynIndex::new_dyn(2);
985    /// let k = DynIndex::new_dyn(2);
986    ///
987    /// let a = TensorDynLen::from_dense(
988    ///     vec![i.clone(), j.clone()],
989    ///     vec![1.0, 0.0, 0.0, 1.0],
990    /// )?;
991    /// let b = TensorDynLen::from_dense(
992    ///     vec![j.clone(), k.clone()],
993    ///     vec![1.0, 2.0, 3.0, 4.0],
994    /// )?;
995    /// let c = TensorDynLen::from_dense(vec![k.clone()], vec![1.0, 10.0])?;
996    ///
997    /// let result = TensorDynLen::contract_connected(&[&a, &b, &c], AllowedPairs::All)?;
998    /// assert_eq!(result.dims(), vec![2]);
999    /// # Ok(())
1000    /// # }
1001    /// ```
1002    fn contract_connected(tensors: &[&Self], allowed: AllowedPairs<'_>) -> Result<Self>;
1003
1004    // ========================================================================
1005    // Vector space operations (for Krylov solvers)
1006    // ========================================================================
1007
1008    /// Compute a linear combination: `a * self + b * other`.
1009    ///
1010    /// This is the fundamental vector space operation.
1011    fn axpby(&self, a: AnyScalar, other: &Self, b: AnyScalar) -> Result<Self>;
1012
1013    /// Scalar multiplication.
1014    fn scale(&self, scalar: AnyScalar) -> Result<Self>;
1015
1016    /// Inner product (dot product) of two tensors.
1017    ///
1018    /// Computes `⟨self, other⟩ = Σ conj(self)_i * other_i`.
1019    fn inner_product(&self, other: &Self) -> Result<AnyScalar>;
1020
1021    /// Compute the Frobenius norm of the tensor.
1022    fn norm(&self) -> f64 {
1023        self.norm_squared().sqrt()
1024    }
1025
1026    /// Maximum absolute value of all elements (L-infinity norm).
1027    fn maxabs(&self) -> f64;
1028
1029    /// Element-wise subtraction: `self - other`.
1030    ///
1031    /// Indices are automatically permuted to match `self`'s order via `axpby`.
1032    ///
1033    /// # Examples
1034    ///
1035    /// ```
1036    /// use tensor4all_core::{DynIndex, TensorDynLen, TensorLike};
1037    ///
1038    /// let i = DynIndex::new_dyn(2);
1039    /// let a = TensorDynLen::from_dense(vec![i.clone()], vec![5.0, 3.0]).unwrap();
1040    /// let b = TensorDynLen::from_dense(vec![i.clone()], vec![1.0, 1.0]).unwrap();
1041    /// let diff = a.sub(&b).unwrap();
1042    /// assert_eq!(diff.to_vec::<f64>().unwrap(), vec![4.0, 2.0]);
1043    /// ```
1044    fn sub(&self, other: &Self) -> Result<Self> {
1045        self.axpby(AnyScalar::new_real(1.0), other, AnyScalar::new_real(-1.0))
1046    }
1047
1048    /// Negate all elements: `-self`.
1049    fn neg(&self) -> Result<Self> {
1050        self.scale(AnyScalar::new_real(-1.0))
1051    }
1052
1053    /// Approximate equality check (Julia `isapprox` semantics).
1054    ///
1055    /// Returns `true` if `||self - other|| <= max(atol, rtol * max(||self||, ||other||))`.
1056    ///
1057    /// # Examples
1058    ///
1059    /// ```
1060    /// use tensor4all_core::{DynIndex, TensorDynLen, TensorLike};
1061    ///
1062    /// let i = DynIndex::new_dyn(3);
1063    /// let a = TensorDynLen::from_dense(vec![i.clone()], vec![1.0, 2.0, 3.0]).unwrap();
1064    /// let b = TensorDynLen::from_dense(vec![i.clone()], vec![1.0, 2.0, 3.0]).unwrap();
1065    /// assert!(a.isapprox(&b, 1e-12, 0.0));
1066    ///
1067    /// let c = TensorDynLen::from_dense(vec![i.clone()], vec![1.1, 2.0, 3.0]).unwrap();
1068    /// assert!(!a.isapprox(&c, 1e-3, 0.0));
1069    /// ```
1070    fn isapprox(&self, other: &Self, atol: f64, rtol: f64) -> bool {
1071        let diff = match self.sub(other) {
1072            Ok(d) => d,
1073            Err(_) => return false,
1074        };
1075        let diff_norm = diff.norm();
1076        diff_norm <= atol.max(rtol * self.norm().max(other.norm()))
1077    }
1078
1079    /// Validate structural consistency of this tensor.
1080    ///
1081    /// The default implementation does nothing (always succeeds).
1082    /// Types with internal structure (for example, block-sparse tensors) can override
1083    /// this to check invariants such as index sharing between blocks.
1084    fn validate(&self) -> Result<()> {
1085        Ok(())
1086    }
1087
1088    /// Create a diagonal (Kronecker delta) tensor for a single index pair.
1089    ///
1090    /// Creates a 2D tensor `T[i, o]` where `T[i, o] = δ_{i,o}` (1 if i==o, 0 otherwise).
1091    ///
1092    /// # Arguments
1093    ///
1094    /// * `input_index` - Input index
1095    /// * `output_index` - Output index (must have same dimension as input)
1096    ///
1097    /// # Returns
1098    ///
1099    /// A 2D tensor with shape `[dim, dim]` representing the identity matrix.
1100    ///
1101    /// # Errors
1102    ///
1103    /// Returns an error if dimensions don't match.
1104    ///
1105    /// # Examples
1106    ///
1107    /// ```
1108    /// use tensor4all_core::{DynIndex, TensorDynLen, TensorLike};
1109    ///
1110    /// let i = DynIndex::new_dyn(3);
1111    /// let o = DynIndex::new_dyn(3);
1112    /// let delta = TensorDynLen::diagonal(&i, &o).unwrap();
1113    ///
1114    /// assert_eq!(delta.dims(), vec![3, 3]);
1115    /// let data = delta.to_vec::<f64>().unwrap();
1116    /// // Identity matrix in column-major: [1,0,0, 0,1,0, 0,0,1]
1117    /// assert!((data[0] - 1.0).abs() < 1e-12);
1118    /// assert!((data[4] - 1.0).abs() < 1e-12);
1119    /// assert!((data[8] - 1.0).abs() < 1e-12);
1120    /// assert!((data[1]).abs() < 1e-12);
1121    /// ```
1122    fn diagonal(
1123        input_index: &<Self as TensorIndex>::Index,
1124        output_index: &<Self as TensorIndex>::Index,
1125    ) -> Result<Self>;
1126
1127    /// Create a delta (identity) tensor as outer product of diagonals.
1128    ///
1129    /// For paired indices `(i1, o1), (i2, o2), ...`, creates a tensor where:
1130    /// `T[i1, o1, i2, o2, ...] = δ_{i1,o1} × δ_{i2,o2} × ...`
1131    ///
1132    /// This is computed as the outer product of individual diagonal tensors.
1133    ///
1134    /// # Arguments
1135    ///
1136    /// * `input_indices` - Input indices
1137    /// * `output_indices` - Output indices (must have same length and matching dimensions)
1138    ///
1139    /// # Returns
1140    ///
1141    /// A tensor representing the identity operator on the given index space.
1142    ///
1143    /// # Errors
1144    ///
1145    /// Returns an error if:
1146    /// - Number of input and output indices don't match
1147    /// - Dimensions of paired indices don't match
1148    ///
1149    /// # Examples
1150    ///
1151    /// ```
1152    /// use tensor4all_core::{DynIndex, TensorDynLen, TensorLike};
1153    ///
1154    /// let i1 = DynIndex::new_dyn(2);
1155    /// let o1 = DynIndex::new_dyn(2);
1156    /// let i2 = DynIndex::new_dyn(3);
1157    /// let o2 = DynIndex::new_dyn(3);
1158    ///
1159    /// let d = TensorDynLen::delta(&[i1, i2], &[o1, o2]).unwrap();
1160    /// assert_eq!(d.dims(), vec![2, 2, 3, 3]);
1161    /// ```
1162    fn delta(
1163        input_indices: &[<Self as TensorIndex>::Index],
1164        output_indices: &[<Self as TensorIndex>::Index],
1165    ) -> Result<Self> {
1166        // Validate same number of input and output indices
1167        if input_indices.len() != output_indices.len() {
1168            return Err(anyhow::anyhow!(
1169                "Number of input indices ({}) must match output indices ({})",
1170                input_indices.len(),
1171                output_indices.len()
1172            ));
1173        }
1174
1175        if input_indices.is_empty() {
1176            // Return a scalar tensor with value 1.0
1177            return Self::scalar_one();
1178        }
1179
1180        // Build as outer product of diagonal tensors
1181        let mut result = Self::diagonal(&input_indices[0], &output_indices[0])?;
1182        for (inp, out) in input_indices[1..].iter().zip(output_indices[1..].iter()) {
1183            let diag = Self::diagonal(inp, out)?;
1184            result = result.outer_product(&diag)?;
1185        }
1186        Ok(result)
1187    }
1188
1189    /// Create a scalar tensor with value 1.0.
1190    ///
1191    /// This is used as the identity element for outer products.
1192    ///
1193    /// # Examples
1194    ///
1195    /// ```
1196    /// use tensor4all_core::{TensorDynLen, TensorLike};
1197    ///
1198    /// let one = TensorDynLen::scalar_one().unwrap();
1199    /// assert_eq!(one.dims(), Vec::<usize>::new());
1200    /// assert!((one.only().real() - 1.0).abs() < 1e-12);
1201    /// ```
1202    fn scalar_one() -> Result<Self>;
1203
1204    /// Create a tensor filled with 1.0 for the given indices.
1205    ///
1206    /// This is useful for adding indices to tensors via outer product
1207    /// without changing tensor values (since multiplying by 1 is identity).
1208    ///
1209    /// # Example
1210    /// To add a dummy index `l` to tensor `T`:
1211    /// ```
1212    /// use tensor4all_core::{DynIndex, TensorDynLen, TensorLike};
1213    ///
1214    /// # fn main() -> anyhow::Result<()> {
1215    /// let i = DynIndex::new_dyn(2);
1216    /// let l = DynIndex::new_dyn(3);
1217    /// let t = TensorDynLen::from_dense(vec![i.clone()], vec![2.0, 4.0])?;
1218    ///
1219    /// let ones = TensorDynLen::ones(&[l.clone()])?;
1220    /// let t_with_l = t.outer_product(&ones)?;
1221    ///
1222    /// assert_eq!(t_with_l.dims(), vec![2, 3]);
1223    /// assert_eq!(t_with_l.to_vec::<f64>()?, vec![2.0, 4.0, 2.0, 4.0, 2.0, 4.0]);
1224    /// # Ok(())
1225    /// # }
1226    /// ```
1227    fn ones(indices: &[<Self as TensorIndex>::Index]) -> Result<Self>;
1228
1229    /// Create a one-hot tensor with value 1.0 at the specified index positions.
1230    ///
1231    /// Similar to ITensors.jl's `onehot(i => 1, j => 2)`.
1232    ///
1233    /// # Arguments
1234    /// * `index_vals` - Pairs of (Index, 0-indexed position)
1235    ///
1236    /// # Errors
1237    /// Returns error if any value >= corresponding index dimension.
1238    ///
1239    /// # Examples
1240    ///
1241    /// ```
1242    /// use tensor4all_core::{DynIndex, TensorDynLen, TensorLike};
1243    ///
1244    /// let i = DynIndex::new_dyn(3);
1245    /// let j = DynIndex::new_dyn(2);
1246    ///
1247    /// // One-hot at i=1, j=0
1248    /// let t = TensorDynLen::onehot(&[(i, 1), (j, 0)]).unwrap();
1249    /// assert_eq!(t.dims(), vec![3, 2]);
1250    ///
1251    /// let data = t.to_vec::<f64>().unwrap();
1252    /// // column-major 3x2: element at (1,0) = index 1
1253    /// assert!((data[1] - 1.0).abs() < 1e-12);
1254    /// assert!((t.sum().real() - 1.0).abs() < 1e-12);  // exactly one non-zero
1255    /// ```
1256    fn onehot(index_vals: &[(<Self as TensorIndex>::Index, usize)]) -> Result<Self>;
1257}
1258
1259/// Result of direct sum operation.
1260///
1261/// Contains the resulting tensor and the new indices created for the summed
1262/// dimensions (one new index per pair in the input).
1263///
1264/// # Examples
1265///
1266/// ```
1267/// use tensor4all_core::{DynIndex, TensorDynLen, TensorLike, IndexLike};
1268///
1269/// let i = DynIndex::new_dyn(2);
1270/// let j = DynIndex::new_dyn(3);
1271///
1272/// let a = TensorDynLen::from_dense(vec![i.clone()], vec![1.0, 2.0]).unwrap();
1273/// let b = TensorDynLen::from_dense(vec![j.clone()], vec![3.0, 4.0, 5.0]).unwrap();
1274///
1275/// let result = a.direct_sum(&b, &[(i.clone(), j.clone())]).unwrap();
1276///
1277/// // New index has dimension = 2 + 3 = 5
1278/// assert_eq!(result.new_indices.len(), 1);
1279/// assert_eq!(result.new_indices[0].dim(), 5);
1280/// assert_eq!(result.tensor.to_vec::<f64>().unwrap(), vec![1.0, 2.0, 3.0, 4.0, 5.0]);
1281/// ```
1282#[derive(Debug, Clone)]
1283pub struct DirectSumResult<T: TensorLike> {
1284    /// The resulting tensor from direct sum.
1285    pub tensor: T,
1286    /// New indices created for the summed dimensions (one per pair).
1287    pub new_indices: Vec<T::Index>,
1288}
1289
1290#[cfg(test)]
1291mod tests;