pub fn matrix_exp_rrule<T, C>(
ctx: &mut C,
tensor: &Tensor<T>,
cotangent: &Tensor<T>,
) -> AdResult<Tensor<T>>where
T: KernelLinalgScalar + Conjugate + ScaleTensorByRealSameShape<C> + MatrixExpAbsTensor<C>,
C: TensorLinalgContextFor<T> + TensorScalarContextFor<Standard<T>> + TensorScalarContextFor<Standard<T::Real>> + TensorSemiringContextFor<Standard<T>> + TensorResolveConjContextFor<T>,
T::Real: KernelLinalgScalar<Real = T::Real> + Float,
<C as TensorScalarContextFor<Standard<T::Real>>>::ScalarBackend: TensorAnalyticPrims<Standard<T::Real>, Context = C>,
C::Backend: 'static,Expand description
Reverse-mode AD rule for matrix exponential (VJP / pullback).
Computes the gradient of the input given a cotangent for exp(A).
Uses the auxiliary 2n x 2n matrix trick (PyTorch approach):
M = [[A^T, cotangent], [0, A^T]]
grad_A = top-right n×n block of exp(M)§Examples
use tenferro_linalg::matrix_exp_rrule;
use tenferro_prims::CpuContext;
use tenferro_tensor::{Tensor, MemoryOrder};
use tenferro_device::LogicalMemorySpace;
let col = MemoryOrder::ColumnMajor;
let mem = LogicalMemorySpace::MainMemory;
let mut ctx = CpuContext::new(1);
let a = Tensor::<f64>::zeros(&[3, 3], mem, col).unwrap();
let cotangent = Tensor::<f64>::ones(&[3, 3], mem, col).unwrap();
let grad_a = matrix_exp_rrule(&mut ctx, &a, &cotangent).unwrap();