pub trait TensorExec {
Show 50 methods
// Required methods
fn add(&mut self, lhs: &Tensor, rhs: &Tensor) -> Result<Tensor>;
fn mul(&mut self, lhs: &Tensor, rhs: &Tensor) -> Result<Tensor>;
fn neg(&mut self, input: &Tensor) -> Result<Tensor>;
fn conj(&mut self, input: &Tensor) -> Result<Tensor>;
fn div(&mut self, lhs: &Tensor, rhs: &Tensor) -> Result<Tensor>;
fn abs(&mut self, input: &Tensor) -> Result<Tensor>;
fn sign(&mut self, input: &Tensor) -> Result<Tensor>;
fn maximum(&mut self, lhs: &Tensor, rhs: &Tensor) -> Result<Tensor>;
fn minimum(&mut self, lhs: &Tensor, rhs: &Tensor) -> Result<Tensor>;
fn compare(
&mut self,
lhs: &Tensor,
rhs: &Tensor,
dir: &CompareDir,
) -> Result<Tensor>;
fn select(
&mut self,
pred: &Tensor,
on_true: &Tensor,
on_false: &Tensor,
) -> Result<Tensor>;
fn clamp(
&mut self,
input: &Tensor,
lower: &Tensor,
upper: &Tensor,
) -> Result<Tensor>;
fn exp(&mut self, input: &Tensor) -> Result<Tensor>;
fn log(&mut self, input: &Tensor) -> Result<Tensor>;
fn sin(&mut self, input: &Tensor) -> Result<Tensor>;
fn cos(&mut self, input: &Tensor) -> Result<Tensor>;
fn tanh(&mut self, input: &Tensor) -> Result<Tensor>;
fn sqrt(&mut self, input: &Tensor) -> Result<Tensor>;
fn rsqrt(&mut self, input: &Tensor) -> Result<Tensor>;
fn pow(&mut self, lhs: &Tensor, rhs: &Tensor) -> Result<Tensor>;
fn expm1(&mut self, input: &Tensor) -> Result<Tensor>;
fn log1p(&mut self, input: &Tensor) -> Result<Tensor>;
fn transpose(&mut self, input: &Tensor, perm: &[usize]) -> Result<Tensor>;
fn reshape(&mut self, input: &Tensor, shape: &[usize]) -> Result<Tensor>;
fn broadcast_in_dim(
&mut self,
input: &Tensor,
shape: &[usize],
dims: &[usize],
) -> Result<Tensor>;
fn convert(&mut self, input: &Tensor, to: DType) -> Result<Tensor>;
fn extract_diagonal(
&mut self,
input: &Tensor,
axis_a: usize,
axis_b: usize,
) -> Result<Tensor>;
fn embed_diagonal(
&mut self,
input: &Tensor,
axis_a: usize,
axis_b: usize,
) -> Result<Tensor>;
fn tril(&mut self, input: &Tensor, k: i64) -> Result<Tensor>;
fn triu(&mut self, input: &Tensor, k: i64) -> Result<Tensor>;
fn reduce_sum(&mut self, input: &Tensor, axes: &[usize]) -> Result<Tensor>;
fn reduce_prod(&mut self, input: &Tensor, axes: &[usize]) -> Result<Tensor>;
fn reduce_max(&mut self, input: &Tensor, axes: &[usize]) -> Result<Tensor>;
fn reduce_min(&mut self, input: &Tensor, axes: &[usize]) -> Result<Tensor>;
fn dot_general(
&mut self,
lhs: &Tensor,
rhs: &Tensor,
config: &DotGeneralConfig,
) -> Result<Tensor>;
fn gather(
&mut self,
operand: &Tensor,
start_indices: &Tensor,
config: &GatherConfig,
) -> Result<Tensor>;
fn scatter(
&mut self,
operand: &Tensor,
scatter_indices: &Tensor,
updates: &Tensor,
config: &ScatterConfig,
) -> Result<Tensor>;
fn slice(&mut self, input: &Tensor, config: &SliceConfig) -> Result<Tensor>;
fn dynamic_slice(
&mut self,
input: &Tensor,
starts: &Tensor,
slice_sizes: &[usize],
) -> Result<Tensor>;
fn pad(&mut self, input: &Tensor, config: &PadConfig) -> Result<Tensor>;
fn concatenate(&mut self, inputs: &[&Tensor], axis: usize) -> Result<Tensor>;
fn reverse(&mut self, input: &Tensor, axes: &[usize]) -> Result<Tensor>;
fn cholesky(&mut self, input: &Tensor) -> Result<Tensor>;
fn triangular_solve(
&mut self,
a: &Tensor,
b: &Tensor,
left_side: bool,
lower: bool,
transpose_a: bool,
unit_diagonal: bool,
) -> Result<Tensor>;
fn lu(&mut self, input: &Tensor) -> Result<Vec<Tensor>>;
fn svd(&mut self, input: &Tensor) -> Result<Vec<Tensor>>;
fn qr(&mut self, input: &Tensor) -> Result<Vec<Tensor>>;
fn eigh(&mut self, input: &Tensor) -> Result<Vec<Tensor>>;
fn eig(&mut self, input: &Tensor) -> Result<Vec<Tensor>>;
fn reclaim_buffer(&mut self, tensor: Tensor);
}Expand description
Execution session surface for dense tensor backends.
All operations run within a backend-owned execution scope such as a CPU rayon pool or a GPU stream. Individual ops must not try to re-enter that scope.
§Examples
ⓘ
use tenferro_tensor::{cpu::CpuBackend, Tensor, TensorBackend, TypedTensor};
let mut backend = CpuBackend::new();
let a = Tensor::F64(TypedTensor::from_vec(vec![2], vec![1.0, 2.0]));
let b = Tensor::F64(TypedTensor::from_vec(vec![2], vec![3.0, 4.0]));
let sum = backend
.with_exec_session(|exec| exec.add(&a, &b))
.unwrap();
assert_eq!(sum.shape(), &[2]);