pub fn lstsq_frule<T, C>(
ctx: &mut C,
a: &Tensor<T>,
b: &Tensor<T>,
tangent_a: &Tensor<T>,
tangent_b: &Tensor<T>,
) -> AdResult<(LstsqResult<T, T::Real>, LstsqResult<T, T::Real>)>where
T: KernelLinalgScalar + KernelLinalgScalar<Real = T> + Float + Conjugate + ScaleTensorByRealSameShape<C>,
T::Real: LinalgScalar<Real = T::Real> + Float + KeepCountScalar,
C: TensorLinalgContextFor<T> + TensorResolveConjContextFor<T> + TensorScalarContextFor<Standard<T>> + TensorSemiringContextFor<Standard<T>> + TensorScalarContextFor<Standard<T::Real>>,
C::Backend: 'static,Expand description
Forward-mode AD rule for least squares (JVP / pushforward).
ยงExamples
use tenferro_linalg::lstsq_frule;
use tenferro_prims::CpuContext;
use tenferro_tensor::{Tensor, MemoryOrder};
use tenferro_device::LogicalMemorySpace;
let col = MemoryOrder::ColumnMajor;
let mem = LogicalMemorySpace::MainMemory;
let mut ctx = CpuContext::new(1);
let a = Tensor::from_slice(&[1.0, 0.0, 1.0, 0.0, 1.0, 1.0], &[3, 2], col).unwrap();
let b = Tensor::from_slice(&[1.0, 2.0, 3.0], &[3], col).unwrap();
let da = Tensor::<f64>::ones(&[3, 2], mem, col).unwrap();
let db = Tensor::<f64>::ones(&[3], mem, col).unwrap();
let (result, dresult) = lstsq_frule(&mut ctx, &a, &b, &da, &db).unwrap();