pub fn det_rrule<T, C>(
ctx: &mut C,
tensor: &Tensor<T>,
cotangent: &Tensor<T>,
) -> AdResult<Tensor<T>>where
T: KernelLinalgScalar + ScaleTensorByRealSameShape<C> + Conjugate,
C: TensorLinalgContextFor<T> + TensorScalarContextFor<Standard<T>> + TensorScalarContextFor<Standard<T::Real>> + TensorMetadataContextFor + TensorResolveConjContextFor<T>,
C::Backend: 'static,
C::MetadataBackend: TensorMetadataPrims<Context = C>,
<C as TensorScalarContextFor<Standard<T>>>::ScalarBackend: TensorMetadataCastPrims<T, Context = C>,
<C as TensorScalarContextFor<Standard<T::Real>>>::ScalarBackend: TensorMetadataCastPrims<T::Real, Context = C>,Expand description
Reverse-mode AD rule for determinant (VJP / pullback).
Ā = det(A) · cotangent · A⁻ᵀ.
§Examples
use tenferro_linalg::det_rrule;
use tenferro_prims::CpuContext;
use tenferro_tensor::{Tensor, MemoryOrder};
use tenferro_device::LogicalMemorySpace;
let col = MemoryOrder::ColumnMajor;
let mem = LogicalMemorySpace::MainMemory;
let mut ctx = CpuContext::new(1);
let a = Tensor::<f64>::eye(3, mem, col).unwrap();
let cotangent = Tensor::<f64>::ones(&[], mem, col).unwrap();
let grad_a = det_rrule(&mut ctx, &a, &cotangent).unwrap();