pub fn matrix_exp_frule<T, C>(
ctx: &mut C,
tensor: &Tensor<T>,
tangent: &Tensor<T>,
) -> AdResult<(Tensor<T>, Tensor<T>)>where
T: KernelLinalgScalar + ScaleTensorByRealSameShape<C> + MatrixExpAbsTensor<C>,
C: TensorLinalgContextFor<T> + TensorScalarContextFor<Standard<T>> + TensorScalarContextFor<Standard<T::Real>> + TensorSemiringContextFor<Standard<T>>,
T::Real: KernelLinalgScalar<Real = T::Real> + Float,
<C as TensorScalarContextFor<Standard<T::Real>>>::ScalarBackend: TensorAnalyticPrims<Standard<T::Real>, Context = C>,
C::Backend: 'static,Expand description
Forward-mode AD rule for matrix exponential (JVP / pushforward).
Computes exp(A) and the Frechet derivative d(exp(A)) in the direction dA.
Uses the auxiliary 2n x 2n matrix trick (PyTorch approach):
M = [[A, dA], [0, A]]
exp(A) = top-left n×n block of exp(M)
d(exp(A)) = top-right n×n block of exp(M)§Examples
use tenferro_linalg::matrix_exp_frule;
use tenferro_prims::CpuContext;
use tenferro_tensor::{Tensor, MemoryOrder};
use tenferro_device::LogicalMemorySpace;
let col = MemoryOrder::ColumnMajor;
let mem = LogicalMemorySpace::MainMemory;
let mut ctx = CpuContext::new(1);
let a = Tensor::<f64>::zeros(&[3, 3], mem, col).unwrap();
let da = Tensor::<f64>::ones(&[3, 3], mem, col).unwrap();
let (exp_a, dexp_a) = matrix_exp_frule(&mut ctx, &a, &da).unwrap();