pub fn einsum_with_plan_into<T: Scalar + HasAlgebra>(
tree: &ContractionTree,
operands: &[&Tensor<T>],
alpha: T,
beta: T,
output: &mut Tensor<T>,
) -> Result<()>Expand description
Execute einsum with a pre-optimized ContractionTree, accumulating
into an existing output.
Computes output = alpha * einsum(operands) + beta * output.
Avoids both subscript parsing and contraction order optimization.
This is the fastest variant for hot loops with pre-allocated buffers.
§Examples
ⓘ
use tenferro_einsum::{einsum_with_plan_into, ContractionTree, Subscripts};
use tenferro_tensor::{Tensor, MemoryOrder};
use tenferro_device::LogicalMemorySpace;
let col = MemoryOrder::ColumnMajor;
let subs = Subscripts::new(&[&[0, 1], &[1, 2]], &[0, 2]);
let tree = ContractionTree::optimize(&subs, &[&[3, 4], &[4, 5]]).unwrap();
let mut c = Tensor::<f64>::zeros(&[3, 5], LogicalMemorySpace::MainMemory, col);
// Hot loop: reuse output buffer, no allocation per iteration
for _ in 0..1000 {
einsum_with_plan_into(&tree, &[&a, &b], 1.0, 0.0, &mut c).unwrap();
}§Errors
Returns an error if the operand shapes do not match those used to build the contraction tree, or the output shape is incorrect.