tensor4all_core/defaults/
tensordynlen.rs

1use crate::defaults::DynIndex;
2use crate::index_like::IndexLike;
3use crate::index_ops::{common_ind_positions, prepare_contraction, prepare_contraction_pairs};
4use crate::storage::{AnyScalar, Storage};
5use anyhow::Result;
6use num_complex::Complex64;
7use num_traits::Zero;
8use rand::Rng;
9use rand_distr::{Distribution, StandardNormal};
10use std::collections::HashSet;
11use std::ops::{Mul, Neg, Sub};
12use std::sync::Arc;
13use tenferro::{ScalarType as NativeScalarType, Tensor as NativeTensor};
14use tensor4all_tensorbackend::{
15    axpby_native_tensor, conj_native_tensor, contract_native_tensor,
16    dense_native_tensor_from_col_major, diag_native_tensor_from_col_major,
17    native_tensor_primal_to_dense_col_major, native_tensor_primal_to_storage,
18    outer_product_native_tensor, permute_native_tensor, reshape_col_major_native_tensor,
19    scale_native_tensor, storage_to_native_tensor, sum_native_tensor, TensorElement,
20};
21
22/// Trait for scalar types that can generate random values from a standard
23/// normal distribution.
24///
25/// This enables the generic [`TensorDynLen::random`] constructor.
26pub trait RandomScalar: TensorElement {
27    /// Generate a random value from the standard normal distribution.
28    fn random_value<R: Rng>(rng: &mut R) -> Self;
29}
30
31impl RandomScalar for f64 {
32    fn random_value<R: Rng>(rng: &mut R) -> Self {
33        StandardNormal.sample(rng)
34    }
35}
36
37impl RandomScalar for Complex64 {
38    fn random_value<R: Rng>(rng: &mut R) -> Self {
39        Complex64::new(StandardNormal.sample(rng), StandardNormal.sample(rng))
40    }
41}
42
43/// Compute the permutation array from original indices to new indices.
44///
45/// This function finds the mapping from new indices to original indices by
46/// matching index IDs. The result is a permutation array `perm` such that
47/// `new_indices[i]` corresponds to `original_indices[perm[i]]`.
48///
49/// # Arguments
50/// * `original_indices` - The original indices in their current order
51/// * `new_indices` - The desired new indices order (must be a permutation of original_indices)
52///
53/// # Returns
54/// A `Vec<usize>` representing the permutation: `perm[i]` is the position in
55/// `original_indices` of the index that should be at position `i` in `new_indices`.
56///
57/// # Panics
58/// Panics if any index ID in `new_indices` doesn't match an index in `original_indices`,
59/// or if there are duplicate indices in `new_indices`.
60///
61/// # Example
62/// ```
63/// use tensor4all_core::tensor::compute_permutation_from_indices;
64/// use tensor4all_core::DynIndex;
65///
66/// let i = DynIndex::new_dyn(2);
67/// let j = DynIndex::new_dyn(3);
68/// let original = vec![i.clone(), j.clone()];
69/// let new_order = vec![j.clone(), i.clone()];
70///
71/// let perm = compute_permutation_from_indices(&original, &new_order);
72/// assert_eq!(perm, vec![1, 0]);  // j is at position 1, i is at position 0
73/// ```
74pub fn compute_permutation_from_indices(
75    original_indices: &[DynIndex],
76    new_indices: &[DynIndex],
77) -> Vec<usize> {
78    assert_eq!(
79        new_indices.len(),
80        original_indices.len(),
81        "new_indices length must match original_indices length"
82    );
83
84    let mut perm = Vec::with_capacity(new_indices.len());
85    let mut used = std::collections::HashSet::new();
86
87    for new_idx in new_indices {
88        // Find the position of this index in the original indices
89        // DynIndex implements Eq, so we can compare directly
90        let pos = original_indices
91            .iter()
92            .position(|old_idx| old_idx == new_idx)
93            .expect("new_indices must be a permutation of original_indices");
94
95        if used.contains(&pos) {
96            panic!("duplicate index in new_indices");
97        }
98        used.insert(pos);
99        perm.push(pos);
100    }
101
102    perm
103}
104
105/// Trait for accessing tensor index metadata.
106pub trait TensorAccess {
107    /// Get a reference to the indices.
108    fn indices(&self) -> &[DynIndex];
109}
110
111/// Tensor with dynamic rank (number of indices) and dynamic scalar type.
112///
113/// This is a concrete type using `DynIndex` (= `Index<DynId, TagSet>`).
114///
115/// The canonical numeric payload is always [`tenferro::Tensor`].
116///
117/// # Examples
118///
119/// ```
120/// use tensor4all_core::{TensorDynLen, DynIndex};
121///
122/// // Create a 2×3 real tensor
123/// let i = DynIndex::new_dyn(2);
124/// let j = DynIndex::new_dyn(3);
125/// let data = vec![1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0];
126/// let t = TensorDynLen::from_dense(vec![i.clone(), j.clone()], data).unwrap();
127///
128/// assert_eq!(t.dims(), vec![2, 3]);
129///
130/// // Sum all elements: 1+2+3+4+5+6 = 21
131/// let s = t.sum();
132/// assert!((s.real() - 21.0).abs() < 1e-12);
133/// ```
134#[derive(Clone)]
135pub struct TensorDynLen {
136    /// Full index information (includes tags and other metadata).
137    pub indices: Vec<DynIndex>,
138    /// Canonical native payload preserving AD metadata.
139    native: Arc<NativeTensor>,
140}
141
142impl TensorAccess for TensorDynLen {
143    fn indices(&self) -> &[DynIndex] {
144        &self.indices
145    }
146}
147
148impl TensorDynLen {
149    fn validate_indices(indices: &[DynIndex]) {
150        let mut seen = HashSet::new();
151        for idx in indices {
152            assert!(
153                seen.insert(idx.clone()),
154                "Tensor indices must all be unique (no duplicate IDs)"
155            );
156        }
157    }
158
159    fn validate_diag_dims(dims: &[usize]) -> Result<()> {
160        if !dims.is_empty() {
161            let first_dim = dims[0];
162            for (i, &dim) in dims.iter().enumerate() {
163                anyhow::ensure!(
164                    dim == first_dim,
165                    "DiagTensor requires all indices to have the same dimension, but dims[{i}] = {dim} != dims[0] = {first_dim}"
166                );
167            }
168        }
169        Ok(())
170    }
171
172    fn seed_native_payload(storage: &Storage, dims: &[usize]) -> Result<NativeTensor> {
173        storage_to_native_tensor(storage, dims)
174    }
175
176    /// Compute dims from `indices` order.
177    #[inline]
178    fn expected_dims_from_indices(indices: &[DynIndex]) -> Vec<usize> {
179        indices.iter().map(|idx| idx.dim()).collect()
180    }
181
182    /// Get dims in the current `indices` order.
183    ///
184    /// This is computed on-demand from `indices` (single source of truth).
185    pub fn dims(&self) -> Vec<usize> {
186        Self::expected_dims_from_indices(&self.indices)
187    }
188
189    /// Create a new tensor with dynamic rank.
190    ///
191    /// # Panics
192    /// Panics if the storage is Diag and not all indices have the same dimension.
193    /// Panics if there are duplicate indices.
194    pub fn new(indices: Vec<DynIndex>, storage: Arc<Storage>) -> Self {
195        match Self::from_storage(indices, storage) {
196            Ok(tensor) => tensor,
197            Err(err) => panic!("TensorDynLen::new failed: {err}"),
198        }
199    }
200
201    /// Create a new tensor with dynamic rank, automatically computing dimensions from indices.
202    ///
203    /// This is a convenience constructor that extracts dimensions from indices using `IndexLike::dim()`.
204    ///
205    /// # Panics
206    /// Panics if the storage is Diag and not all indices have the same dimension.
207    /// Panics if there are duplicate indices.
208    pub fn from_indices(indices: Vec<DynIndex>, storage: Arc<Storage>) -> Self {
209        Self::new(indices, storage)
210    }
211
212    /// Create a tensor from explicit storage by seeding a canonical native payload.
213    pub fn from_storage(indices: Vec<DynIndex>, storage: Arc<Storage>) -> Result<Self> {
214        let dims = Self::expected_dims_from_indices(&indices);
215        Self::validate_indices(&indices);
216        if storage.is_diag() {
217            Self::validate_diag_dims(&dims)?;
218        }
219        let native = Self::seed_native_payload(storage.as_ref(), &dims)?;
220        Self::from_native(indices, native)
221    }
222
223    /// Create a tensor from a native tenferro payload.
224    pub(crate) fn from_native(indices: Vec<DynIndex>, native: NativeTensor) -> Result<Self> {
225        let dims = Self::expected_dims_from_indices(&indices);
226        Self::validate_indices(&indices);
227        if dims != native.dims() {
228            return Err(anyhow::anyhow!(
229                "native payload dims {:?} do not match indices dims {:?}",
230                native.dims(),
231                dims
232            ));
233        }
234        if native.is_diag() {
235            Self::validate_diag_dims(&dims)?;
236        }
237        Ok(Self {
238            indices,
239            native: Arc::new(native),
240        })
241    }
242
243    /// Borrow the indices.
244    pub fn indices(&self) -> &[DynIndex] {
245        &self.indices
246    }
247
248    /// Borrow the native payload.
249    pub(crate) fn as_native(&self) -> &NativeTensor {
250        self.native.as_ref()
251    }
252
253    /// Returns whether the tensor participates in reverse-mode AD.
254    pub fn requires_grad(&self) -> bool {
255        self.native.requires_grad()
256    }
257
258    /// Enables or disables reverse-mode gradient tracking.
259    pub fn set_requires_grad(&mut self, enabled: bool) -> Result<()> {
260        self.native = Arc::new(self.native.as_ref().detach().with_requires_grad(enabled));
261        Ok(())
262    }
263
264    /// Returns the accumulated reverse gradient when available.
265    pub fn grad(&self) -> Result<Option<Self>> {
266        self.native
267            .grad()
268            .map_err(|e| anyhow::anyhow!("TensorDynLen::grad failed: {e}"))?
269            .map(|native| {
270                Self::from_native(self.indices.clone(), native).map_err(|e| {
271                    anyhow::anyhow!(
272                        "TensorDynLen::grad returned a tensor incompatible with indices: {e}"
273                    )
274                })
275            })
276            .transpose()
277    }
278
279    /// Clears accumulated reverse gradients on reverse leaves.
280    pub fn zero_grad(&self) -> Result<()> {
281        self.native
282            .zero_grad()
283            .map_err(|e| anyhow::anyhow!("TensorDynLen::zero_grad failed: {e}"))
284    }
285
286    /// Runs reverse-mode AD backward pass from this tensor.
287    ///
288    /// Accumulates gradients on all input tensors that have `requires_grad == true`.
289    ///
290    /// # Arguments
291    /// * `grad_output` - Optional gradient seed. Pass `None` for default (ones).
292    pub fn backward(&self, grad_output: Option<&Self>) -> Result<()> {
293        match grad_output {
294            Some(grad_output) => self
295                .native
296                .backward_with_seed(&grad_output.native)
297                .map_err(|e| anyhow::anyhow!("TensorDynLen::backward_with_seed failed: {e}")),
298            None => self
299                .native
300                .backward()
301                .map_err(|e| anyhow::anyhow!("TensorDynLen::backward failed: {e}")),
302        }
303    }
304
305    /// Check if this tensor is already in canonical form.
306    pub fn is_simple(&self) -> bool {
307        true
308    }
309
310    /// Materialize the primal snapshot as storage.
311    pub fn to_storage(&self) -> Result<Arc<Storage>> {
312        Ok(Arc::new(native_tensor_primal_to_storage(
313            self.native.as_ref(),
314        )?))
315    }
316
317    /// Materialize the primal snapshot as storage.
318    pub fn storage(&self) -> Arc<Storage> {
319        self.to_storage()
320            .expect("TensorDynLen::storage snapshot materialization failed")
321    }
322
323    /// Sum all elements, returning `AnyScalar`.
324    pub fn sum(&self) -> AnyScalar {
325        sum_native_tensor(&self.native).expect("native sum failed")
326    }
327
328    /// Extract the scalar value from a 0-dimensional tensor (or 1-element tensor).
329    ///
330    /// This is similar to Julia's `only()` function.
331    ///
332    /// # Panics
333    ///
334    /// Panics if the tensor has more than one element.
335    ///
336    /// # Example
337    ///
338    /// ```
339    /// use tensor4all_core::{TensorDynLen, AnyScalar};
340    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
341    ///
342    /// // Create a scalar tensor (0 dimensions, 1 element)
343    /// let indices: Vec<Index<DynId>> = vec![];
344    /// let tensor: TensorDynLen = TensorDynLen::from_dense(indices, vec![42.0]).unwrap();
345    ///
346    /// assert_eq!(tensor.only().real(), 42.0);
347    /// ```
348    pub fn only(&self) -> AnyScalar {
349        let dims = self.dims();
350        let total_size: usize = dims.iter().product();
351        assert!(
352            total_size == 1 || dims.is_empty(),
353            "only() requires a scalar tensor (1 element), got {} elements with dims {:?}",
354            if dims.is_empty() { 1 } else { total_size },
355            dims
356        );
357        self.sum()
358    }
359
360    /// Permute the tensor dimensions using the given new indices order.
361    ///
362    /// This is the main permutation method that takes the desired new indices
363    /// and automatically computes the corresponding permutation of dimensions
364    /// and data. The new indices must be a permutation of the original indices
365    /// (matched by ID).
366    ///
367    /// # Arguments
368    /// * `new_indices` - The desired new indices order. Must be a permutation
369    ///   of `self.indices` (matched by ID).
370    ///
371    /// # Panics
372    /// Panics if `new_indices.len() != self.indices.len()`, if any index ID
373    /// doesn't match, or if there are duplicate indices.
374    ///
375    /// # Example
376    /// ```
377    /// use tensor4all_core::TensorDynLen;
378    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
379    ///
380    /// // Create a 2×3 tensor
381    /// let i = Index::new_dyn(2);
382    /// let j = Index::new_dyn(3);
383    /// let indices = vec![i.clone(), j.clone()];
384    /// let tensor: TensorDynLen = TensorDynLen::from_dense(indices, vec![0.0; 6]).unwrap();
385    ///
386    /// // Permute to 3×2: swap the two dimensions by providing new indices order
387    /// let permuted = tensor.permute_indices(&[j, i]);
388    /// assert_eq!(permuted.dims(), vec![3, 2]);
389    /// ```
390    pub fn permute_indices(&self, new_indices: &[DynIndex]) -> Self {
391        // Compute permutation by matching IDs
392        let perm = compute_permutation_from_indices(&self.indices, new_indices);
393
394        let permuted_native =
395            permute_native_tensor(&self.native, &perm).expect("native permute_indices failed");
396        Self::from_native(new_indices.to_vec(), permuted_native)
397            .expect("native permute_indices snapshot failed")
398    }
399
400    /// Permute the tensor dimensions, returning a new tensor.
401    ///
402    /// This method reorders the indices, dimensions, and data according to the
403    /// given permutation. The permutation specifies which old axis each new
404    /// axis corresponds to: `new_axis[i] = old_axis[perm[i]]`.
405    ///
406    /// # Arguments
407    /// * `perm` - The permutation: `perm[i]` is the old axis index for new axis `i`
408    ///
409    /// # Panics
410    /// Panics if `perm.len() != self.indices.len()` or if the permutation is invalid.
411    ///
412    /// # Example
413    /// ```
414    /// use tensor4all_core::TensorDynLen;
415    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
416    ///
417    /// // Create a 2×3 tensor
418    /// let indices = vec![
419    ///     Index::new_dyn(2),
420    ///     Index::new_dyn(3),
421    /// ];
422    /// let tensor: TensorDynLen = TensorDynLen::from_dense(indices, vec![0.0; 6]).unwrap();
423    ///
424    /// // Permute to 3×2: swap the two dimensions
425    /// let permuted = tensor.permute(&[1, 0]);
426    /// assert_eq!(permuted.dims(), vec![3, 2]);
427    /// ```
428    pub fn permute(&self, perm: &[usize]) -> Self {
429        assert_eq!(
430            perm.len(),
431            self.indices.len(),
432            "permutation length must match tensor rank"
433        );
434
435        // Permute indices
436        let new_indices: Vec<DynIndex> = perm.iter().map(|&i| self.indices[i].clone()).collect();
437        let permuted_native =
438            permute_native_tensor(&self.native, perm).expect("native permute failed");
439        Self::from_native(new_indices, permuted_native).expect("native permute snapshot failed")
440    }
441
442    /// Contract this tensor with another tensor along common indices.
443    ///
444    /// This method finds common indices between `self` and `other`, then contracts
445    /// along those indices. The result tensor contains all non-contracted indices
446    /// from both tensors, with indices from `self` appearing first, followed by
447    /// indices from `other` that are not common.
448    ///
449    /// # Arguments
450    /// * `other` - The tensor to contract with
451    ///
452    /// # Returns
453    /// A new tensor resulting from the contraction.
454    ///
455    /// # Panics
456    /// Panics if there are no common indices, if common indices have mismatched
457    /// dimensions, or if storage types don't match.
458    ///
459    /// # Example
460    /// ```
461    /// use tensor4all_core::TensorDynLen;
462    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
463    ///
464    /// // Create two tensors: A[i, j] and B[j, k]
465    /// let i = Index::new_dyn(2);
466    /// let j = Index::new_dyn(3);
467    /// let k = Index::new_dyn(4);
468    ///
469    /// let indices_a = vec![i.clone(), j.clone()];
470    /// let tensor_a: TensorDynLen = TensorDynLen::from_dense(indices_a, vec![0.0; 6]).unwrap();
471    ///
472    /// let indices_b = vec![j.clone(), k.clone()];
473    /// let tensor_b: TensorDynLen = TensorDynLen::from_dense(indices_b, vec![0.0; 12]).unwrap();
474    ///
475    /// // Contract along j: result is C[i, k]
476    /// let result = tensor_a.contract(&tensor_b);
477    /// assert_eq!(result.dims(), vec![2, 4]);
478    /// ```
479    pub fn contract(&self, other: &Self) -> Self {
480        let self_dims = Self::expected_dims_from_indices(&self.indices);
481        let other_dims = Self::expected_dims_from_indices(&other.indices);
482        let spec = prepare_contraction(&self.indices, &self_dims, &other.indices, &other_dims)
483            .expect("contraction preparation failed");
484
485        let result_native =
486            contract_native_tensor(&self.native, &spec.axes_a, &other.native, &spec.axes_b)
487                .expect("native contract failed");
488        Self::from_native(spec.result_indices, result_native)
489            .expect("native contract snapshot failed")
490    }
491
492    /// Contract this tensor with another tensor along explicitly specified index pairs.
493    ///
494    /// Similar to NumPy's `tensordot`, this method contracts only along the explicitly
495    /// specified pairs of indices. Unlike `contract()` which automatically contracts
496    /// all common indices, `tensordot` gives you explicit control over which indices
497    /// to contract.
498    ///
499    /// # Arguments
500    /// * `other` - The tensor to contract with
501    /// * `pairs` - Pairs of indices to contract: `(index_from_self, index_from_other)`
502    ///
503    /// # Returns
504    /// A new tensor resulting from the contraction, or an error if:
505    /// - Any specified index is not found in the respective tensor
506    /// - Dimensions don't match for any pair
507    /// - The same axis is specified multiple times in `self` or `other`
508    /// - There are common indices (same ID) that are not in the contraction pairs
509    ///   (batch contraction is not yet implemented)
510    ///
511    /// # Future: Batch Contraction
512    /// In a future version, common indices not specified in `pairs` will be treated
513    /// as batch dimensions (like batched GEMM). Currently, this case returns an error.
514    ///
515    /// # Example
516    /// ```
517    /// use tensor4all_core::TensorDynLen;
518    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
519    ///
520    /// // Create two tensors: A[i, j] and B[k, l] where j and k have same dimension but different IDs
521    /// let i = Index::new_dyn(2);
522    /// let j = Index::new_dyn(3);
523    /// let k = Index::new_dyn(3);  // Same dimension as j, but different ID
524    /// let l = Index::new_dyn(4);
525    ///
526    /// let indices_a = vec![i.clone(), j.clone()];
527    /// let tensor_a: TensorDynLen = TensorDynLen::from_dense(indices_a, vec![0.0; 6]).unwrap();
528    ///
529    /// let indices_b = vec![k.clone(), l.clone()];
530    /// let tensor_b: TensorDynLen = TensorDynLen::from_dense(indices_b, vec![0.0; 12]).unwrap();
531    ///
532    /// // Contract j (from A) with k (from B): result is C[i, l]
533    /// let result = tensor_a.tensordot(&tensor_b, &[(j.clone(), k.clone())]).unwrap();
534    /// assert_eq!(result.dims(), vec![2, 4]);
535    /// ```
536    pub fn tensordot(&self, other: &Self, pairs: &[(DynIndex, DynIndex)]) -> Result<Self> {
537        use crate::index_ops::ContractionError;
538
539        let self_dims = Self::expected_dims_from_indices(&self.indices);
540        let other_dims = Self::expected_dims_from_indices(&other.indices);
541        let spec = prepare_contraction_pairs(
542            &self.indices,
543            &self_dims,
544            &other.indices,
545            &other_dims,
546            pairs,
547        )
548        .map_err(|e| match e {
549            ContractionError::NoCommonIndices => {
550                anyhow::anyhow!("tensordot: No pairs specified for contraction")
551            }
552            ContractionError::BatchContractionNotImplemented => anyhow::anyhow!(
553                "tensordot: Common index found but not in contraction pairs. \
554                         Batch contraction is not yet implemented."
555            ),
556            ContractionError::IndexNotFound { tensor } => {
557                anyhow::anyhow!("tensordot: Index not found in {} tensor", tensor)
558            }
559            ContractionError::DimensionMismatch {
560                pos_a,
561                pos_b,
562                dim_a,
563                dim_b,
564            } => anyhow::anyhow!(
565                "tensordot: Dimension mismatch: self[{}]={} != other[{}]={}",
566                pos_a,
567                dim_a,
568                pos_b,
569                dim_b
570            ),
571            ContractionError::DuplicateAxis { tensor, pos } => {
572                anyhow::anyhow!("tensordot: Duplicate axis {} in {} tensor", pos, tensor)
573            }
574        })?;
575
576        let result_native =
577            contract_native_tensor(&self.native, &spec.axes_a, &other.native, &spec.axes_b)?;
578        Self::from_native(spec.result_indices, result_native)
579    }
580
581    /// Compute the outer product (tensor product) of two tensors.
582    ///
583    /// Creates a new tensor whose indices are the concatenation of the indices
584    /// from both input tensors. The result has shape `[...self.dims, ...other.dims]`.
585    ///
586    /// This is equivalent to numpy's `np.outer` or `np.tensordot(a, b, axes=0)`,
587    /// or ITensor's `*` operator when there are no common indices.
588    ///
589    /// # Arguments
590    /// * `other` - The other tensor to compute outer product with
591    ///
592    /// # Returns
593    /// A new tensor with indices from both tensors.
594    ///
595    /// # Example
596    /// ```
597    /// use tensor4all_core::TensorDynLen;
598    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
599    ///
600    /// let i = Index::new_dyn(2);
601    /// let j = Index::new_dyn(3);
602    /// let tensor_a: TensorDynLen = TensorDynLen::from_dense(vec![i.clone()], vec![1.0, 2.0]).unwrap();
603    /// let tensor_b: TensorDynLen =
604    ///     TensorDynLen::from_dense(vec![j.clone()], vec![1.0, 2.0, 3.0]).unwrap();
605    ///
606    /// // Outer product: C[i, j] = A[i] * B[j]
607    /// let result = tensor_a.outer_product(&tensor_b).unwrap();
608    /// assert_eq!(result.dims(), vec![2, 3]);
609    /// ```
610    pub fn outer_product(&self, other: &Self) -> Result<Self> {
611        use anyhow::Context;
612
613        // Check for common indices - outer product should have none
614        let common_positions = common_ind_positions(&self.indices, &other.indices);
615        if !common_positions.is_empty() {
616            let common_ids: Vec<_> = common_positions
617                .iter()
618                .map(|(pos_a, _)| self.indices[*pos_a].id())
619                .collect();
620            return Err(anyhow::anyhow!(
621                "outer_product: tensors have common indices {:?}. \
622                 Use tensordot to contract common indices, or use sim() to replace \
623                 indices with fresh IDs before computing outer product.",
624                common_ids
625            ))
626            .context("outer_product: common indices found");
627        }
628
629        // Build result indices and dimensions
630        let mut result_indices = self.indices.clone();
631        result_indices.extend(other.indices.iter().cloned());
632        let result_native = outer_product_native_tensor(&self.native, &other.native)
633            .expect("native outer product failed");
634        Self::from_native(result_indices, result_native)
635    }
636}
637
638// ============================================================================
639// Random tensor generation
640// ============================================================================
641
642impl TensorDynLen {
643    /// Create a random tensor with values from standard normal distribution (generic over scalar type).
644    ///
645    /// For `f64`, each element is drawn from the standard normal distribution.
646    /// For `Complex64`, both real and imaginary parts are drawn independently.
647    ///
648    /// # Type Parameters
649    /// * `T` - The scalar element type (must implement [`RandomScalar`])
650    /// * `R` - The random number generator type
651    ///
652    /// # Arguments
653    /// * `rng` - Random number generator
654    /// * `indices` - The indices for the tensor
655    ///
656    /// # Example
657    /// ```
658    /// use tensor4all_core::TensorDynLen;
659    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
660    /// use rand::SeedableRng;
661    /// use rand_chacha::ChaCha8Rng;
662    ///
663    /// let mut rng = ChaCha8Rng::seed_from_u64(42);
664    /// let i = Index::new_dyn(2);
665    /// let j = Index::new_dyn(3);
666    /// let tensor: TensorDynLen = TensorDynLen::random::<f64, _>(&mut rng, vec![i, j]);
667    /// assert_eq!(tensor.dims(), vec![2, 3]);
668    /// ```
669    pub fn random<T: RandomScalar, R: Rng>(rng: &mut R, indices: Vec<DynIndex>) -> Self {
670        let dims: Vec<usize> = indices.iter().map(|idx| idx.dim()).collect();
671        let size: usize = dims.iter().product();
672        let data: Vec<T> = (0..size).map(|_| T::random_value(rng)).collect();
673        Self::from_dense(indices, data).expect("TensorDynLen::random failed")
674    }
675}
676
677/// Implement multiplication operator for tensor contraction.
678///
679/// The `*` operator performs tensor contraction along common indices.
680/// This is equivalent to calling the `contract` method.
681///
682/// # Example
683/// ```
684/// use tensor4all_core::TensorDynLen;
685/// use tensor4all_core::index::{DefaultIndex as Index, DynId};
686///
687/// // Create two tensors: A[i, j] and B[j, k]
688/// let i = Index::new_dyn(2);
689/// let j = Index::new_dyn(3);
690/// let k = Index::new_dyn(4);
691///
692/// let indices_a = vec![i.clone(), j.clone()];
693/// let tensor_a: TensorDynLen = TensorDynLen::from_dense(indices_a, vec![0.0; 6]).unwrap();
694///
695/// let indices_b = vec![j.clone(), k.clone()];
696/// let tensor_b: TensorDynLen = TensorDynLen::from_dense(indices_b, vec![0.0; 12]).unwrap();
697///
698/// // Contract along j using * operator: result is C[i, k]
699/// let result = &tensor_a * &tensor_b;
700/// assert_eq!(result.dims(), vec![2, 4]);
701/// ```
702impl Mul<&TensorDynLen> for &TensorDynLen {
703    type Output = TensorDynLen;
704
705    fn mul(self, other: &TensorDynLen) -> Self::Output {
706        self.contract(other)
707    }
708}
709
710/// Implement multiplication operator for tensor contraction (owned version).
711///
712/// This allows using `tensor_a * tensor_b` when both tensors are owned.
713impl Mul<TensorDynLen> for TensorDynLen {
714    type Output = TensorDynLen;
715
716    fn mul(self, other: TensorDynLen) -> Self::Output {
717        self.contract(&other)
718    }
719}
720
721/// Implement multiplication operator for tensor contraction (mixed reference/owned).
722impl Mul<TensorDynLen> for &TensorDynLen {
723    type Output = TensorDynLen;
724
725    fn mul(self, other: TensorDynLen) -> Self::Output {
726        self.contract(&other)
727    }
728}
729
730/// Implement multiplication operator for tensor contraction (mixed owned/reference).
731impl Mul<&TensorDynLen> for TensorDynLen {
732    type Output = TensorDynLen;
733
734    fn mul(self, other: &TensorDynLen) -> Self::Output {
735        self.contract(other)
736    }
737}
738
739impl Sub<&TensorDynLen> for &TensorDynLen {
740    type Output = TensorDynLen;
741
742    fn sub(self, other: &TensorDynLen) -> Self::Output {
743        TensorDynLen::axpby(
744            self,
745            AnyScalar::new_real(1.0),
746            other,
747            AnyScalar::new_real(-1.0),
748        )
749        .expect("tensor subtraction failed")
750    }
751}
752
753impl Sub<TensorDynLen> for TensorDynLen {
754    type Output = TensorDynLen;
755
756    fn sub(self, other: TensorDynLen) -> Self::Output {
757        Sub::sub(&self, &other)
758    }
759}
760
761impl Sub<TensorDynLen> for &TensorDynLen {
762    type Output = TensorDynLen;
763
764    fn sub(self, other: TensorDynLen) -> Self::Output {
765        Sub::sub(self, &other)
766    }
767}
768
769impl Sub<&TensorDynLen> for TensorDynLen {
770    type Output = TensorDynLen;
771
772    fn sub(self, other: &TensorDynLen) -> Self::Output {
773        Sub::sub(&self, other)
774    }
775}
776
777impl Neg for &TensorDynLen {
778    type Output = TensorDynLen;
779
780    fn neg(self) -> Self::Output {
781        TensorDynLen::scale(self, AnyScalar::new_real(-1.0)).expect("tensor negation failed")
782    }
783}
784
785impl Neg for TensorDynLen {
786    type Output = TensorDynLen;
787
788    fn neg(self) -> Self::Output {
789        Neg::neg(&self)
790    }
791}
792
793impl TensorDynLen {
794    /// Add two tensors element-wise.
795    ///
796    /// The tensors must have the same index set (matched by ID). If the indices
797    /// are in a different order, the other tensor will be permuted to match `self`.
798    ///
799    /// # Arguments
800    /// * `other` - The tensor to add
801    ///
802    /// # Returns
803    /// A new tensor representing `self + other`, or an error if:
804    /// - The tensors have different index sets
805    /// - The dimensions don't match
806    /// - Storage types are incompatible
807    ///
808    /// # Example
809    /// ```
810    /// use tensor4all_core::TensorDynLen;
811    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
812    ///
813    /// let i = Index::new_dyn(2);
814    /// let j = Index::new_dyn(3);
815    ///
816    /// let indices_a = vec![i.clone(), j.clone()];
817    /// let data_a = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
818    /// let tensor_a: TensorDynLen = TensorDynLen::from_dense(indices_a, data_a).unwrap();
819    ///
820    /// let indices_b = vec![i.clone(), j.clone()];
821    /// let data_b = vec![1.0, 1.0, 1.0, 1.0, 1.0, 1.0];
822    /// let tensor_b: TensorDynLen = TensorDynLen::from_dense(indices_b, data_b).unwrap();
823    ///
824    /// let sum = tensor_a.add(&tensor_b).unwrap();
825    /// // sum = [[2, 3, 4], [5, 6, 7]]
826    /// ```
827    pub fn add(&self, other: &Self) -> Result<Self> {
828        // Validate that both tensors have the same number of indices
829        if self.indices.len() != other.indices.len() {
830            return Err(anyhow::anyhow!(
831                "Index count mismatch: self has {} indices, other has {}",
832                self.indices.len(),
833                other.indices.len()
834            ));
835        }
836
837        // Validate that both tensors have the same set of indices
838        let self_set: HashSet<_> = self.indices.iter().collect();
839        let other_set: HashSet<_> = other.indices.iter().collect();
840
841        if self_set != other_set {
842            return Err(anyhow::anyhow!(
843                "Index set mismatch: tensors must have the same indices"
844            ));
845        }
846
847        // Permute other to match self's index order (no-op if already aligned)
848        let other_aligned = other.permute_indices(&self.indices);
849
850        // Validate dimensions match after alignment
851        let self_expected_dims = Self::expected_dims_from_indices(&self.indices);
852        let other_expected_dims = Self::expected_dims_from_indices(&other_aligned.indices);
853        if self_expected_dims != other_expected_dims {
854            use crate::TagSetLike;
855            let fmt = |indices: &[DynIndex]| -> Vec<String> {
856                indices
857                    .iter()
858                    .map(|idx| {
859                        let tags: Vec<String> = idx.tags().iter().collect();
860                        format!("{:?}(dim={},tags={:?})", idx.id(), idx.dim(), tags)
861                    })
862                    .collect()
863            };
864            return Err(anyhow::anyhow!(
865                "Dimension mismatch after alignment.\n\
866                 self: dims={:?}, indices(order)={:?}\n\
867                 other_aligned: dims={:?}, indices(order)={:?}",
868                self_expected_dims,
869                fmt(&self.indices),
870                other_expected_dims,
871                fmt(&other_aligned.indices)
872            ));
873        }
874
875        self.axpby(
876            AnyScalar::new_real(1.0),
877            &other_aligned,
878            AnyScalar::new_real(1.0),
879        )
880    }
881
882    /// Compute a linear combination: `a * self + b * other`.
883    pub fn axpby(&self, a: AnyScalar, other: &Self, b: AnyScalar) -> Result<Self> {
884        // Validate that both tensors have the same number of indices.
885        if self.indices.len() != other.indices.len() {
886            return Err(anyhow::anyhow!(
887                "Index count mismatch: self has {} indices, other has {}",
888                self.indices.len(),
889                other.indices.len()
890            ));
891        }
892
893        // Validate that both tensors have the same set of indices.
894        let self_set: HashSet<_> = self.indices.iter().collect();
895        let other_set: HashSet<_> = other.indices.iter().collect();
896        if self_set != other_set {
897            return Err(anyhow::anyhow!(
898                "Index set mismatch: tensors must have the same indices"
899            ));
900        }
901
902        // Align other tensor axis order to self.
903        let other_aligned = other.permute_indices(&self.indices);
904
905        // Validate dimensions match after alignment.
906        let self_expected_dims = Self::expected_dims_from_indices(&self.indices);
907        let other_expected_dims = Self::expected_dims_from_indices(&other_aligned.indices);
908        if self_expected_dims != other_expected_dims {
909            return Err(anyhow::anyhow!(
910                "Dimension mismatch after alignment: self={:?}, other_aligned={:?}",
911                self_expected_dims,
912                other_expected_dims
913            ));
914        }
915
916        // Reuse storage-level fused axpby to avoid materializing two scaled temporaries.
917        let combined = axpby_native_tensor(&self.native, &a, &other_aligned.native, &b)?;
918        Self::from_native(self.indices.clone(), combined)
919    }
920
921    /// Scalar multiplication.
922    pub fn scale(&self, scalar: AnyScalar) -> Result<Self> {
923        let scaled = scale_native_tensor(&self.native, &scalar)?;
924        Self::from_native(self.indices.clone(), scaled)
925    }
926
927    /// Inner product (dot product) of two tensors.
928    ///
929    /// Computes `⟨self, other⟩ = Σ conj(self)_i * other_i`.
930    pub fn inner_product(&self, other: &Self) -> Result<AnyScalar> {
931        if self.indices.len() == other.indices.len() {
932            let self_set: HashSet<_> = self.indices.iter().collect();
933            let other_set: HashSet<_> = other.indices.iter().collect();
934            if self_set == other_set {
935                let other_aligned = other.permute_indices(&self.indices);
936                let conj_self = conj_native_tensor(&self.native)?;
937                let axes: Vec<usize> = (0..self.indices.len()).collect();
938                let result_native =
939                    contract_native_tensor(&conj_self, &axes, &other_aligned.native, &axes)?;
940                return sum_native_tensor(&result_native);
941            }
942        }
943
944        // Contract self.conj() with other over all indices
945        let conj_self = self.conj();
946        let result =
947            super::contract::contract_multi(&[&conj_self, other], crate::AllowedPairs::All)?;
948        // Result should be a scalar (no indices)
949        Ok(result.sum())
950    }
951}
952
953// ============================================================================
954// Index Replacement Methods
955// ============================================================================
956
957impl TensorDynLen {
958    /// Replace an index in the tensor with a new index.
959    ///
960    /// This replaces the index matching `old_index` by ID with `new_index`.
961    /// The storage data is not modified, only the index metadata is changed.
962    ///
963    /// # Arguments
964    /// * `old_index` - The index to replace (matched by ID)
965    /// * `new_index` - The new index to use
966    ///
967    /// # Returns
968    /// A new tensor with the index replaced. If no index matches `old_index`,
969    /// returns a clone of the original tensor.
970    ///
971    /// # Example
972    /// ```
973    /// use tensor4all_core::TensorDynLen;
974    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
975    ///
976    /// let i = Index::new_dyn(2);
977    /// let j = Index::new_dyn(3);
978    /// let new_i = Index::new_dyn(2);  // Same dimension, different ID
979    ///
980    /// let indices = vec![i.clone(), j.clone()];
981    /// let tensor: TensorDynLen = TensorDynLen::from_dense(indices, vec![0.0; 6]).unwrap();
982    ///
983    /// // Replace index i with new_i
984    /// let replaced = tensor.replaceind(&i, &new_i);
985    /// assert_eq!(replaced.indices[0].id, new_i.id);
986    /// assert_eq!(replaced.indices[1].id, j.id);
987    /// ```
988    pub fn replaceind(&self, old_index: &DynIndex, new_index: &DynIndex) -> Self {
989        // Validate dimension match
990        if old_index.dim() != new_index.dim() {
991            panic!(
992                "Index space mismatch: cannot replace index with dimension {} with index of dimension {}",
993                old_index.dim(),
994                new_index.dim()
995            );
996        }
997
998        let new_indices: Vec<_> = self
999            .indices
1000            .iter()
1001            .map(|idx| {
1002                if *idx == *old_index {
1003                    new_index.clone()
1004                } else {
1005                    idx.clone()
1006                }
1007            })
1008            .collect();
1009
1010        Self {
1011            indices: new_indices,
1012            native: self.native.clone(),
1013        }
1014    }
1015
1016    /// Replace multiple indices in the tensor.
1017    ///
1018    /// This replaces each index in `old_indices` (matched by ID) with the corresponding
1019    /// index in `new_indices`. The storage data is not modified.
1020    ///
1021    /// # Arguments
1022    /// * `old_indices` - The indices to replace (matched by ID)
1023    /// * `new_indices` - The new indices to use
1024    ///
1025    /// # Panics
1026    /// Panics if `old_indices` and `new_indices` have different lengths.
1027    ///
1028    /// # Returns
1029    /// A new tensor with the indices replaced. Indices not found in `old_indices`
1030    /// are kept unchanged.
1031    ///
1032    /// # Example
1033    /// ```
1034    /// use tensor4all_core::TensorDynLen;
1035    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
1036    ///
1037    /// let i = Index::new_dyn(2);
1038    /// let j = Index::new_dyn(3);
1039    /// let new_i = Index::new_dyn(2);
1040    /// let new_j = Index::new_dyn(3);
1041    ///
1042    /// let indices = vec![i.clone(), j.clone()];
1043    /// let tensor: TensorDynLen = TensorDynLen::from_dense(indices, vec![0.0; 6]).unwrap();
1044    ///
1045    /// // Replace both indices
1046    /// let replaced = tensor.replaceinds(&[i.clone(), j.clone()], &[new_i.clone(), new_j.clone()]);
1047    /// assert_eq!(replaced.indices[0].id, new_i.id);
1048    /// assert_eq!(replaced.indices[1].id, new_j.id);
1049    /// ```
1050    pub fn replaceinds(&self, old_indices: &[DynIndex], new_indices: &[DynIndex]) -> Self {
1051        assert_eq!(
1052            old_indices.len(),
1053            new_indices.len(),
1054            "old_indices and new_indices must have the same length"
1055        );
1056
1057        // Validate dimension matches for all replacements
1058        for (old, new) in old_indices.iter().zip(new_indices.iter()) {
1059            if old.dim() != new.dim() {
1060                panic!(
1061                    "Index space mismatch: cannot replace index with dimension {} with index of dimension {}",
1062                    old.dim(),
1063                    new.dim()
1064                );
1065            }
1066        }
1067
1068        // Build a map from old indices to new indices
1069        let replacement_map: std::collections::HashMap<_, _> =
1070            old_indices.iter().zip(new_indices.iter()).collect();
1071
1072        let new_indices_vec: Vec<_> = self
1073            .indices
1074            .iter()
1075            .map(|idx| {
1076                if let Some(new_idx) = replacement_map.get(idx) {
1077                    (*new_idx).clone()
1078                } else {
1079                    idx.clone()
1080                }
1081            })
1082            .collect();
1083
1084        Self {
1085            indices: new_indices_vec,
1086            native: self.native.clone(),
1087        }
1088    }
1089}
1090
1091// ============================================================================
1092// Complex Conjugation
1093// ============================================================================
1094
1095impl TensorDynLen {
1096    /// Complex conjugate of all tensor elements.
1097    ///
1098    /// For real (f64) tensors, returns a copy (conjugate of real is identity).
1099    /// For complex (Complex64) tensors, conjugates each element.
1100    ///
1101    /// The indices and dimensions remain unchanged.
1102    ///
1103    /// This is inspired by the `conj` operation in ITensorMPS.jl.
1104    ///
1105    /// # Example
1106    /// ```
1107    /// use tensor4all_core::TensorDynLen;
1108    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
1109    /// use num_complex::Complex64;
1110    ///
1111    /// let i = Index::new_dyn(2);
1112    /// let data = vec![Complex64::new(1.0, 2.0), Complex64::new(3.0, -4.0)];
1113    /// let tensor: TensorDynLen = TensorDynLen::from_dense(vec![i], data).unwrap();
1114    ///
1115    /// let conj_tensor = tensor.conj();
1116    /// // Elements are now conjugated: 1-2i, 3+4i
1117    /// ```
1118    pub fn conj(&self) -> Self {
1119        // Conjugate tensor: conjugate storage data and map indices via IndexLike::conj()
1120        // For default undirected indices, conj() is a no-op, so this is future-proof
1121        // for QSpace-compatible directed indices where conj() flips Ket <-> Bra
1122        let new_indices: Vec<DynIndex> = self.indices.iter().map(|idx| idx.conj()).collect();
1123        let conj_native = conj_native_tensor(&self.native).expect("native conjugation failed");
1124        Self::from_native(new_indices, conj_native).expect("native conjugation snapshot failed")
1125    }
1126}
1127
1128// ============================================================================
1129// Norm Computation
1130// ============================================================================
1131
1132impl TensorDynLen {
1133    /// Compute the squared Frobenius norm of the tensor: ||T||² = Σ|T_ijk...|²
1134    ///
1135    /// For real tensors: sum of squares of all elements.
1136    /// For complex tensors: sum of |z|² = z * conj(z) for all elements.
1137    ///
1138    /// # Example
1139    /// ```
1140    /// use tensor4all_core::TensorDynLen;
1141    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
1142    ///
1143    /// let i = Index::new_dyn(2);
1144    /// let j = Index::new_dyn(3);
1145    /// let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0];  // 1² + 2² + ... + 6² = 91
1146    /// let tensor: TensorDynLen = TensorDynLen::from_dense(vec![i, j], data).unwrap();
1147    ///
1148    /// assert!((tensor.norm_squared() - 91.0).abs() < 1e-10);
1149    /// ```
1150    pub fn norm_squared(&self) -> f64 {
1151        // Special case: scalar tensor (no indices)
1152        if self.indices.is_empty() {
1153            // For a scalar, ||T||² = |value|²
1154            let value = self.sum();
1155            let abs_val = value.abs();
1156            return abs_val * abs_val;
1157        }
1158
1159        // Contract tensor with its conjugate over all indices → scalar
1160        // ||T||² = Σ T_ijk... * conj(T_ijk...) = Σ |T_ijk...|²
1161        let conj = self.conj();
1162        let scalar = self.contract(&conj);
1163        // The mathematical result is nonnegative and real. Clamp tiny negative
1164        // roundoff so downstream `sqrt` stays well-defined for complex tensors.
1165        scalar.sum().real().max(0.0)
1166    }
1167
1168    /// Compute the Frobenius norm of the tensor: ||T|| = sqrt(Σ|T_ijk...|²)
1169    ///
1170    /// # Example
1171    /// ```
1172    /// use tensor4all_core::TensorDynLen;
1173    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
1174    ///
1175    /// let i = Index::new_dyn(2);
1176    /// let data = vec![3.0, 4.0];  // sqrt(9 + 16) = 5
1177    /// let tensor: TensorDynLen = TensorDynLen::from_dense(vec![i], data).unwrap();
1178    ///
1179    /// assert!((tensor.norm() - 5.0).abs() < 1e-10);
1180    /// ```
1181    pub fn norm(&self) -> f64 {
1182        self.norm_squared().sqrt()
1183    }
1184
1185    /// Maximum absolute value of all elements (L-infinity norm).
1186    pub fn maxabs(&self) -> f64 {
1187        self.to_storage()
1188            .map(|storage| storage.max_abs())
1189            .unwrap_or(0.0)
1190    }
1191
1192    /// Compute the relative distance between two tensors.
1193    ///
1194    /// Returns `||A - B|| / ||A||` (Frobenius norm).
1195    /// If `||A|| = 0`, returns `||B||` instead to avoid division by zero.
1196    ///
1197    /// This is the ITensor-style distance function useful for comparing tensors.
1198    ///
1199    /// # Arguments
1200    /// * `other` - The other tensor to compare with
1201    ///
1202    /// # Returns
1203    /// The relative distance as a f64 value.
1204    ///
1205    /// # Note
1206    /// The indices of both tensors must be permutable to each other.
1207    /// The result tensor (A - B) uses the index ordering from self.
1208    ///
1209    /// # Example
1210    /// ```
1211    /// use tensor4all_core::TensorDynLen;
1212    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
1213    ///
1214    /// let i = Index::new_dyn(2);
1215    /// let data_a = vec![1.0, 0.0];
1216    /// let data_b = vec![1.0, 0.0];  // Same tensor
1217    /// let tensor_a: TensorDynLen = TensorDynLen::from_dense(vec![i.clone()], data_a).unwrap();
1218    /// let tensor_b: TensorDynLen = TensorDynLen::from_dense(vec![i.clone()], data_b).unwrap();
1219    ///
1220    /// assert!(tensor_a.distance(&tensor_b) < 1e-10);  // Zero distance
1221    /// ```
1222    pub fn distance(&self, other: &Self) -> f64 {
1223        let norm_self = self.norm();
1224
1225        // Compute A - B = A + (-1) * B
1226        let neg_other = other
1227            .scale(AnyScalar::new_real(-1.0))
1228            .expect("distance: tensor scaling failed");
1229        let diff = self
1230            .add(&neg_other)
1231            .expect("distance: tensors must have same indices");
1232        let norm_diff = diff.norm();
1233
1234        if norm_self > 0.0 {
1235            norm_diff / norm_self
1236        } else {
1237            norm_diff
1238        }
1239    }
1240}
1241
1242impl std::fmt::Debug for TensorDynLen {
1243    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1244        f.debug_struct("TensorDynLen")
1245            .field("indices", &self.indices)
1246            .field("dims", &self.dims())
1247            .field("is_diag", &self.native.is_diag())
1248            .finish()
1249    }
1250}
1251
1252/// Create a diagonal tensor with dynamic rank from diagonal data.
1253///
1254/// # Arguments
1255/// * `indices` - The indices for the tensor (all must have the same dimension)
1256/// * `diag_data` - The diagonal elements (length must equal the dimension of indices)
1257///
1258/// The public native bridge currently materializes diagonal payloads densely, so
1259/// the returned tensor is mathematically diagonal but may not report
1260/// [`TensorDynLen::is_diag`] at the native-storage level.
1261///
1262/// # Panics
1263/// Panics if indices have different dimensions, or if diag_data length doesn't match.
1264pub fn diag_tensor_dyn_len(indices: Vec<DynIndex>, diag_data: Vec<f64>) -> TensorDynLen {
1265    TensorDynLen::from_diag(indices, diag_data)
1266        .unwrap_or_else(|err| panic!("diag_tensor_dyn_len failed: {err}"))
1267}
1268
1269/// Unfold a tensor into a matrix by splitting indices into left and right groups.
1270///
1271/// This function validates the split, permutes the tensor so that left indices
1272/// come first, and returns a rank-2 native tenferro tensor along with metadata.
1273///
1274/// # Arguments
1275/// * `t` - Input tensor
1276/// * `left_inds` - Indices to place on the left (row) side of the matrix
1277///
1278/// # Returns
1279/// A tuple `(matrix_tensor, left_len, m, n, left_indices, right_indices)` where:
1280/// - `matrix_tensor` is a rank-2 `tenferro::Tensor` with shape `[m, n]`
1281/// - `left_len` is the number of left indices
1282/// - `m` is the product of left index dimensions
1283/// - `n` is the product of right index dimensions
1284/// - `left_indices` is the vector of left indices (cloned)
1285/// - `right_indices` is the vector of right indices (cloned)
1286///
1287/// # Errors
1288/// Returns an error if:
1289/// - The tensor rank is < 2
1290/// - `left_inds` is empty or contains all indices
1291/// - `left_inds` contains indices not in the tensor or duplicates
1292/// - Native reshape fails
1293#[allow(clippy::type_complexity)]
1294pub fn unfold_split(
1295    t: &TensorDynLen,
1296    left_inds: &[DynIndex],
1297) -> Result<(
1298    NativeTensor,
1299    usize,
1300    usize,
1301    usize,
1302    Vec<DynIndex>,
1303    Vec<DynIndex>,
1304)> {
1305    let rank = t.indices.len();
1306
1307    // Validate rank
1308    anyhow::ensure!(rank >= 2, "Tensor must have rank >= 2, got rank {}", rank);
1309
1310    let left_len = left_inds.len();
1311
1312    // Validate split: must be a proper subset
1313    anyhow::ensure!(
1314        left_len > 0 && left_len < rank,
1315        "Left indices must be a non-empty proper subset of tensor indices (0 < left_len < rank), got left_len={}, rank={}",
1316        left_len,
1317        rank
1318    );
1319
1320    // Validate that all left_inds are in the tensor and there are no duplicates
1321    let tensor_set: HashSet<_> = t.indices.iter().collect();
1322    let mut left_set = HashSet::new();
1323
1324    for left_idx in left_inds {
1325        anyhow::ensure!(
1326            tensor_set.contains(left_idx),
1327            "Index in left_inds not found in tensor"
1328        );
1329        anyhow::ensure!(left_set.insert(left_idx), "Duplicate index in left_inds");
1330    }
1331
1332    // Build right_inds: all indices not in left_inds, in original order
1333    let mut right_inds = Vec::new();
1334    for idx in &t.indices {
1335        if !left_set.contains(idx) {
1336            right_inds.push(idx.clone());
1337        }
1338    }
1339
1340    // Build new_indices: left_inds first, then right_inds
1341    let mut new_indices = Vec::with_capacity(rank);
1342    new_indices.extend_from_slice(left_inds);
1343    new_indices.extend_from_slice(&right_inds);
1344
1345    // Permute tensor to have left indices first, then right indices
1346    let unfolded = t.permute_indices(&new_indices);
1347
1348    // Compute matrix dimensions
1349    let unfolded_dims = unfolded.dims();
1350    let m: usize = unfolded_dims[..left_len].iter().product();
1351    let n: usize = unfolded_dims[left_len..].iter().product();
1352
1353    let matrix_tensor = reshape_col_major_native_tensor(unfolded.as_native(), &[m, n])?;
1354
1355    Ok((
1356        matrix_tensor,
1357        left_len,
1358        m,
1359        n,
1360        left_inds.to_vec(),
1361        right_inds,
1362    ))
1363}
1364
1365// ============================================================================
1366// TensorIndex implementation for TensorDynLen
1367// ============================================================================
1368
1369use crate::tensor_index::TensorIndex;
1370
1371impl TensorIndex for TensorDynLen {
1372    type Index = DynIndex;
1373
1374    fn external_indices(&self) -> Vec<DynIndex> {
1375        // For TensorDynLen, all indices are external.
1376        self.indices.clone()
1377    }
1378
1379    fn num_external_indices(&self) -> usize {
1380        self.indices.len()
1381    }
1382
1383    fn replaceind(&self, old_index: &DynIndex, new_index: &DynIndex) -> Result<Self> {
1384        // Delegate to the inherent method
1385        Ok(TensorDynLen::replaceind(self, old_index, new_index))
1386    }
1387
1388    fn replaceinds(&self, old_indices: &[DynIndex], new_indices: &[DynIndex]) -> Result<Self> {
1389        // Delegate to the inherent method
1390        Ok(TensorDynLen::replaceinds(self, old_indices, new_indices))
1391    }
1392}
1393
1394// ============================================================================
1395// TensorLike implementation for TensorDynLen
1396// ============================================================================
1397
1398use crate::tensor_like::{FactorizeError, FactorizeOptions, FactorizeResult, TensorLike};
1399
1400impl TensorLike for TensorDynLen {
1401    fn factorize(
1402        &self,
1403        left_inds: &[DynIndex],
1404        options: &FactorizeOptions,
1405    ) -> std::result::Result<FactorizeResult<Self>, FactorizeError> {
1406        crate::factorize::factorize(self, left_inds, options)
1407    }
1408
1409    fn conj(&self) -> Self {
1410        // Delegate to the inherent method (complex conjugate for dense tensors)
1411        TensorDynLen::conj(self)
1412    }
1413
1414    fn direct_sum(
1415        &self,
1416        other: &Self,
1417        pairs: &[(DynIndex, DynIndex)],
1418    ) -> Result<crate::tensor_like::DirectSumResult<Self>> {
1419        let (tensor, new_indices) = crate::direct_sum::direct_sum(self, other, pairs)?;
1420        Ok(crate::tensor_like::DirectSumResult {
1421            tensor,
1422            new_indices,
1423        })
1424    }
1425
1426    fn outer_product(&self, other: &Self) -> Result<Self> {
1427        // Delegate to the inherent method
1428        TensorDynLen::outer_product(self, other)
1429    }
1430
1431    fn norm_squared(&self) -> f64 {
1432        // Delegate to the inherent method
1433        TensorDynLen::norm_squared(self)
1434    }
1435
1436    fn maxabs(&self) -> f64 {
1437        TensorDynLen::maxabs(self)
1438    }
1439
1440    fn permuteinds(&self, new_order: &[DynIndex]) -> Result<Self> {
1441        // Delegate to the inherent method
1442        Ok(TensorDynLen::permute_indices(self, new_order))
1443    }
1444
1445    fn contract(tensors: &[&Self], allowed: crate::AllowedPairs<'_>) -> Result<Self> {
1446        // Delegate to contract_multi which handles disconnected components
1447        super::contract::contract_multi(tensors, allowed)
1448    }
1449
1450    fn contract_connected(tensors: &[&Self], allowed: crate::AllowedPairs<'_>) -> Result<Self> {
1451        // Delegate to contract_connected which requires connected graph
1452        super::contract::contract_connected(tensors, allowed)
1453    }
1454
1455    fn axpby(&self, a: crate::AnyScalar, other: &Self, b: crate::AnyScalar) -> Result<Self> {
1456        // Delegate to the inherent method
1457        TensorDynLen::axpby(self, a, other, b)
1458    }
1459
1460    fn scale(&self, scalar: crate::AnyScalar) -> Result<Self> {
1461        // Delegate to the inherent method
1462        TensorDynLen::scale(self, scalar)
1463    }
1464
1465    fn inner_product(&self, other: &Self) -> Result<crate::AnyScalar> {
1466        // Delegate to the inherent method
1467        TensorDynLen::inner_product(self, other)
1468    }
1469
1470    fn diagonal(input_index: &DynIndex, output_index: &DynIndex) -> Result<Self> {
1471        let dim = input_index.dim();
1472        if dim != output_index.dim() {
1473            return Err(anyhow::anyhow!(
1474                "Dimension mismatch: input index has dim {}, output has dim {}",
1475                dim,
1476                output_index.dim(),
1477            ));
1478        }
1479
1480        // Build identity matrix
1481        let mut data = vec![0.0_f64; dim * dim];
1482        for i in 0..dim {
1483            data[i * dim + i] = 1.0;
1484        }
1485
1486        TensorDynLen::from_dense(vec![input_index.clone(), output_index.clone()], data)
1487    }
1488
1489    fn scalar_one() -> Result<Self> {
1490        TensorDynLen::from_dense(vec![], vec![1.0_f64])
1491    }
1492
1493    fn ones(indices: &[DynIndex]) -> Result<Self> {
1494        if indices.is_empty() {
1495            return Self::scalar_one();
1496        }
1497        let dims: Vec<usize> = indices.iter().map(|idx| idx.size()).collect();
1498        let total_size = checked_total_size(&dims)?;
1499        TensorDynLen::from_dense(indices.to_vec(), vec![1.0_f64; total_size])
1500    }
1501
1502    fn onehot(index_vals: &[(DynIndex, usize)]) -> Result<Self> {
1503        if index_vals.is_empty() {
1504            return Self::scalar_one();
1505        }
1506        let indices: Vec<DynIndex> = index_vals.iter().map(|(idx, _)| idx.clone()).collect();
1507        let vals: Vec<usize> = index_vals.iter().map(|(_, v)| *v).collect();
1508        let dims: Vec<usize> = indices.iter().map(|idx| idx.size()).collect();
1509
1510        for (k, (&v, &d)) in vals.iter().zip(dims.iter()).enumerate() {
1511            if v >= d {
1512                return Err(anyhow::anyhow!(
1513                    "onehot: value {} at position {} is >= dimension {}",
1514                    v,
1515                    k,
1516                    d
1517                ));
1518            }
1519        }
1520
1521        let total_size = checked_total_size(&dims)?;
1522        let mut data = vec![0.0_f64; total_size];
1523
1524        let offset = column_major_offset(&dims, &vals)?;
1525        data[offset] = 1.0;
1526
1527        Self::from_dense(indices, data)
1528    }
1529
1530    // delta() uses the default implementation via diagonal() and outer_product()
1531}
1532
1533fn checked_total_size(dims: &[usize]) -> Result<usize> {
1534    dims.iter().try_fold(1_usize, |acc, &d| {
1535        if d == 0 {
1536            return Err(anyhow::anyhow!("invalid dimension 0"));
1537        }
1538        acc.checked_mul(d)
1539            .ok_or_else(|| anyhow::anyhow!("tensor size overflow"))
1540    })
1541}
1542
1543fn column_major_offset(dims: &[usize], vals: &[usize]) -> Result<usize> {
1544    if dims.len() != vals.len() {
1545        return Err(anyhow::anyhow!(
1546            "column_major_offset: dims.len() != vals.len()"
1547        ));
1548    }
1549    checked_total_size(dims)?;
1550
1551    let mut offset = 0usize;
1552    let mut stride = 1usize;
1553    for (k, (&v, &d)) in vals.iter().zip(dims.iter()).enumerate() {
1554        if d == 0 {
1555            return Err(anyhow::anyhow!("invalid dimension 0 at position {}", k));
1556        }
1557        if v >= d {
1558            return Err(anyhow::anyhow!(
1559                "column_major_offset: value {} at position {} is >= dimension {}",
1560                v,
1561                k,
1562                d
1563            ));
1564        }
1565        let term = v
1566            .checked_mul(stride)
1567            .ok_or_else(|| anyhow::anyhow!("column_major_offset: overflow"))?;
1568        offset = offset
1569            .checked_add(term)
1570            .ok_or_else(|| anyhow::anyhow!("column_major_offset: overflow"))?;
1571        stride = stride
1572            .checked_mul(d)
1573            .ok_or_else(|| anyhow::anyhow!("column_major_offset: overflow"))?;
1574    }
1575    Ok(offset)
1576}
1577
1578// ============================================================================
1579// High-level API for tensor construction (avoids direct Storage access)
1580// ============================================================================
1581
1582impl TensorDynLen {
1583    fn validate_dense_payload_len(data_len: usize, dims: &[usize]) -> Result<()> {
1584        let expected_len = checked_total_size(dims)?;
1585        anyhow::ensure!(
1586            data_len == expected_len,
1587            "dense payload length {} does not match dims {:?} (expected {})",
1588            data_len,
1589            dims,
1590            expected_len
1591        );
1592        Ok(())
1593    }
1594
1595    fn validate_diag_payload_len(data_len: usize, dims: &[usize]) -> Result<()> {
1596        anyhow::ensure!(
1597            !dims.is_empty(),
1598            "diagonal tensor construction requires at least one index"
1599        );
1600        Self::validate_diag_dims(dims)?;
1601        anyhow::ensure!(
1602            data_len == dims[0],
1603            "diagonal payload length {} does not match diagonal dimension {}",
1604            data_len,
1605            dims[0]
1606        );
1607        Ok(())
1608    }
1609
1610    /// Create a tensor from dense data with explicit indices.
1611    ///
1612    /// This is the recommended high-level API for creating tensors from raw data.
1613    /// It avoids direct access to `Storage` internals.
1614    ///
1615    /// # Type Parameters
1616    /// * `T` - Scalar type (`f64` or `Complex64`)
1617    ///
1618    /// # Arguments
1619    /// * `indices` - Vector of indices for the tensor
1620    /// * `data` - Tensor data in column-major order
1621    ///
1622    /// # Panics
1623    /// Panics if data length doesn't match the product of index dimensions.
1624    ///
1625    /// # Example
1626    /// ```
1627    /// use tensor4all_core::TensorDynLen;
1628    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
1629    ///
1630    /// let i = Index::new_dyn(2);
1631    /// let j = Index::new_dyn(3);
1632    /// let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
1633    /// let tensor: TensorDynLen = TensorDynLen::from_dense(vec![i, j], data).unwrap();
1634    /// assert_eq!(tensor.dims(), vec![2, 3]);
1635    /// ```
1636    pub fn from_dense<T: TensorElement>(indices: Vec<DynIndex>, data: Vec<T>) -> Result<Self> {
1637        let dims = Self::expected_dims_from_indices(&indices);
1638        Self::validate_indices(&indices);
1639        Self::validate_dense_payload_len(data.len(), &dims)?;
1640        let native = dense_native_tensor_from_col_major(&data, &dims)?;
1641        Self::from_native(indices, native)
1642    }
1643
1644    /// Create a diagonal tensor from diagonal payload data with explicit indices.
1645    ///
1646    /// The public native bridge currently materializes diagonal payloads densely, so
1647    /// the returned tensor is mathematically diagonal but may not report
1648    /// [`TensorDynLen::is_diag`] at the native-storage level.
1649    pub fn from_diag<T: TensorElement>(indices: Vec<DynIndex>, data: Vec<T>) -> Result<Self> {
1650        let dims = Self::expected_dims_from_indices(&indices);
1651        Self::validate_indices(&indices);
1652        Self::validate_diag_payload_len(data.len(), &dims)?;
1653        let native = diag_native_tensor_from_col_major(&data, dims.len())?;
1654        Self::from_native(indices, native)
1655    }
1656
1657    /// Create a scalar (0-dimensional) tensor from a supported element value.
1658    ///
1659    /// # Example
1660    /// ```
1661    /// use tensor4all_core::TensorDynLen;
1662    ///
1663    /// let scalar = TensorDynLen::scalar(42.0).unwrap();
1664    /// assert_eq!(scalar.dims(), Vec::<usize>::new());
1665    /// assert_eq!(scalar.only().real(), 42.0);
1666    /// ```
1667    pub fn scalar<T: TensorElement>(value: T) -> Result<Self> {
1668        Self::from_dense(vec![], vec![value])
1669    }
1670
1671    /// Create a tensor filled with zeros of a supported element type.
1672    ///
1673    /// # Example
1674    /// ```
1675    /// use tensor4all_core::TensorDynLen;
1676    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
1677    ///
1678    /// let i = Index::new_dyn(2);
1679    /// let j = Index::new_dyn(3);
1680    /// let tensor = TensorDynLen::zeros::<f64>(vec![i, j]).unwrap();
1681    /// assert_eq!(tensor.dims(), vec![2, 3]);
1682    /// ```
1683    pub fn zeros<T: TensorElement + Zero + Clone>(indices: Vec<DynIndex>) -> Result<Self> {
1684        let dims: Vec<usize> = indices.iter().map(|idx| idx.dim()).collect();
1685        let size: usize = dims.iter().product();
1686        Self::from_dense(indices, vec![T::zero(); size])
1687    }
1688}
1689
1690// ============================================================================
1691// High-level API for data extraction (avoids direct .storage() access)
1692// ============================================================================
1693
1694impl TensorDynLen {
1695    /// Extract tensor data as a column-major `Vec<T>`.
1696    ///
1697    /// # Type Parameters
1698    /// * `T` - The scalar element type (`f64` or `Complex64`).
1699    ///
1700    /// # Returns
1701    /// A vector of the tensor data in column-major order.
1702    ///
1703    /// # Errors
1704    /// Returns an error if the tensor's scalar type does not match `T`.
1705    ///
1706    /// # Example
1707    /// ```
1708    /// use tensor4all_core::TensorDynLen;
1709    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
1710    ///
1711    /// let i = Index::new_dyn(2);
1712    /// let tensor = TensorDynLen::from_dense(vec![i], vec![1.0, 2.0]).unwrap();
1713    /// let data = tensor.to_vec::<f64>().unwrap();
1714    /// assert_eq!(data, &[1.0, 2.0]);
1715    /// ```
1716    pub fn to_vec<T: TensorElement>(&self) -> Result<Vec<T>> {
1717        native_tensor_primal_to_dense_col_major(&self.native)
1718    }
1719
1720    /// Extract tensor data as a column-major `Vec<f64>`.
1721    ///
1722    /// Prefer the generic [`to_vec::<f64>()`](Self::to_vec) method.
1723    /// This wrapper is kept for C API compatibility.
1724    pub fn as_slice_f64(&self) -> Result<Vec<f64>> {
1725        self.to_vec::<f64>()
1726    }
1727
1728    /// Extract tensor data as a column-major `Vec<Complex64>`.
1729    ///
1730    /// Prefer the generic [`to_vec::<Complex64>()`](Self::to_vec) method.
1731    /// This wrapper is kept for C API compatibility.
1732    pub fn as_slice_c64(&self) -> Result<Vec<Complex64>> {
1733        self.to_vec::<Complex64>()
1734    }
1735
1736    /// Check if the tensor has f64 storage.
1737    ///
1738    /// # Example
1739    /// ```
1740    /// use tensor4all_core::TensorDynLen;
1741    /// use tensor4all_core::index::{DefaultIndex as Index, DynId};
1742    ///
1743    /// let i = Index::new_dyn(2);
1744    /// let tensor = TensorDynLen::from_dense(vec![i], vec![1.0, 2.0]).unwrap();
1745    /// assert!(tensor.is_f64());
1746    /// assert!(!tensor.is_complex());
1747    /// ```
1748    pub fn is_f64(&self) -> bool {
1749        self.native.scalar_type() == NativeScalarType::F64
1750    }
1751
1752    /// Check whether the tensor currently uses native diagonal structured storage.
1753    ///
1754    /// This is a storage-level predicate, not a semantic diagonality check.
1755    pub fn is_diag(&self) -> bool {
1756        self.native.is_diag()
1757    }
1758
1759    /// Check if the tensor has complex storage (C64).
1760    pub fn is_complex(&self) -> bool {
1761        self.native.scalar_type() == NativeScalarType::C64
1762    }
1763}