pub fn eig_rrule<T, C>(
ctx: &mut C,
tensor: &Tensor<T>,
cotangent: &EigCotangent<T>,
) -> AdResult<Tensor<T>>where
T: KernelLinalgScalar + KernelLinalgScalar<Real = T, Complex = Complex<T>> + Float,
C: TensorLinalgContextFor<T> + TensorLinalgContextFor<Complex<T>> + TensorScalarContextFor<Standard<T::Real>>,
Complex<T>: KernelLinalgScalar,
<C as TensorLinalgContextFor<T>>::Backend: 'static,
<C as TensorLinalgContextFor<Complex<T>>>::Backend: 'static,
T::Real: KeepCountScalar,Expand description
Reverse-mode AD rule for general eigendecomposition (VJP / pullback).
Given eigendecomposition A V = V diag(lambda), computes the gradient
of the input A from complex-valued cotangents for eigenvalues and
eigenvectors using the Mike Giles formulas.
The cotangent uses EigCotangent with complex-valued tensors
because eig() returns complex output even for real inputs.
ยงExamples
use tenferro_linalg::{eig_rrule, EigCotangent};
use tenferro_prims::CpuContext;
use tenferro_tensor::{Tensor, MemoryOrder};
use tenferro_device::LogicalMemorySpace;
use num_complex::Complex64;
let col = MemoryOrder::ColumnMajor;
let mem = LogicalMemorySpace::MainMemory;
let mut ctx = CpuContext::new(1);
let a = Tensor::<f64>::zeros(&[3, 3], mem, col).unwrap();
let cotangent = EigCotangent::<f64> {
values: None,
vectors: None,
};
let grad_a = eig_rrule(&mut ctx, &a, &cotangent).unwrap();