pub fn eager_einsum(
ctx: &mut impl TensorBackend,
inputs: &[&Tensor],
subscripts: &str,
) -> Result<Tensor>Expand description
Eager N-ary einsum on concrete Tensor values.
This applies the same contraction-tree optimization strategy used by the traced einsum path, but executes each contraction immediately against the provided backend context.
ยงExamples
use tenferro_einsum::eager_einsum;
use tenferro_tensor::{Tensor, TensorBackend, cpu::CpuBackend};
let mut ctx = CpuBackend::new();
let a = Tensor::from_vec(vec![2, 3], vec![1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0]);
let b = Tensor::from_vec(vec![3, 2], vec![1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0]);
let c = eager_einsum(&mut ctx, &[&a, &b], "ij,jk->ik").unwrap();
assert_eq!(c.shape(), &[2, 2]);
assert_eq!(c.as_slice::<f64>().unwrap(), &[22.0, 28.0, 49.0, 64.0]);