pub fn lu<T, C>(
ctx: &mut C,
tensor: &Tensor<T>,
pivot: LuPivot,
) -> Result<LuResult<T>>where
C: TensorLinalgContextFor<T> + TensorMetadataContextFor + TensorScalarContextFor<Standard<T::Real>>,
C::MetadataBackend: TensorMetadataPrims<Context = C>,
<C as TensorScalarContextFor<Standard<T::Real>>>::ScalarBackend: TensorMetadataCastPrims<T::Real, Context = C>,
T: LiftPermutationMatrixTensor<C> + KernelLinalgScalar,
C::Backend: 'static,Expand description
Compute the LU decomposition of a batched matrix.
Input shape: (m, n, *).
The function internally normalizes input to column-major contiguous layout. If the input is not already contiguous, an internal copy is performed.
§Arguments
tensor— Input tensor of shape(m, n, *)pivot— Pivoting strategy
§Examples
use tenferro_linalg::{lu, LuPivot};
use tenferro_prims::CpuContext;
use tenferro_tensor::{MemoryOrder, Tensor};
let mut ctx = CpuContext::new(1);
let a = Tensor::<f64>::from_slice(&[1.0, 0.0, 0.0, 1.0], &[2, 2], MemoryOrder::ColumnMajor)
.unwrap();
let _partial = lu(&mut ctx, &a, LuPivot::Partial).unwrap();
let no_pivot = lu(&mut ctx, &a, LuPivot::NoPivot).unwrap();
assert_eq!(no_pivot.p.dims(), &[0]);