pub struct EagerTensor<B: TensorBackend = CpuBackend> { /* private fields */ }Expand description
Eager tensor with reverse-mode autodiff over concrete tensor values.
This executes each primitive immediately and records a lightweight reverse
DAG for backward(). Gradients accumulate across repeated backward()
calls until they are cleared explicitly.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::requires_grad(Tensor::from_vec(vec![3], vec![1.0_f64, 2.0, 3.0]));
let loss = (&x * &x).reduce_sum(&[0]).unwrap();
let _cotangents = loss.backward().unwrap();
let loss = (&x * &x).reduce_sum(&[0]).unwrap();
let _cotangents = loss.backward().unwrap();
assert_eq!(x.grad().unwrap().as_slice::<f64>().unwrap(), &[4.0, 8.0, 12.0]);
x.clear_grad();
assert!(x.grad().is_none());Implementations§
Source§impl EagerTensor<CpuBackend>
impl EagerTensor<CpuBackend>
Sourcepub fn from_tensor(tensor: Tensor) -> Self
pub fn from_tensor(tensor: Tensor) -> Self
Create an untracked eager tensor on the default CPU backend.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
assert_eq!(x.data().as_slice::<f64>().unwrap(), &[1.0, 2.0]);
assert!(x.grad().is_none());Sourcepub fn requires_grad(tensor: Tensor) -> Self
pub fn requires_grad(tensor: Tensor) -> Self
Create a tracked eager leaf on the default CPU backend.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::requires_grad(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
assert!(x.grad().is_none());Source§impl<B: TensorBackend> EagerTensor<B>
impl<B: TensorBackend> EagerTensor<B>
Sourcepub fn from_tensor_in(tensor: Tensor, ctx: Arc<EagerContext<B>>) -> Self
pub fn from_tensor_in(tensor: Tensor, ctx: Arc<EagerContext<B>>) -> Self
Create an untracked eager tensor inside an existing eager context.
§Examples
use tenferro::{CpuBackend, EagerContext, EagerTensor, Tensor};
let ctx = EagerContext::with_backend(CpuBackend::new());
let x = EagerTensor::from_tensor_in(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]), ctx);
assert_eq!(x.data().as_slice::<f64>().unwrap(), &[1.0, 2.0]);Sourcepub fn requires_grad_in(tensor: Tensor, ctx: Arc<EagerContext<B>>) -> Self
pub fn requires_grad_in(tensor: Tensor, ctx: Arc<EagerContext<B>>) -> Self
Create a tracked eager leaf inside an existing eager context.
§Examples
use tenferro::{CpuBackend, EagerContext, EagerTensor, Tensor};
let ctx = EagerContext::with_backend(CpuBackend::new());
let x = EagerTensor::requires_grad_in(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]), ctx);
assert!(x.grad().is_none());Sourcepub fn detach(&self) -> Self
pub fn detach(&self) -> Self
Detach this tensor from the reverse graph.
The returned tensor keeps the concrete value but no longer contributes gradients to the original graph.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::requires_grad(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
let y = x.detach();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 2.0]);
assert!(y.grad().is_none());Sourcepub fn data(&self) -> &Tensor
pub fn data(&self) -> &Tensor
Borrow the concrete tensor value.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![3.0_f64]));
assert_eq!(x.data().as_slice::<f64>().unwrap(), &[3.0]);Sourcepub fn grad(&self) -> Option<Arc<Tensor>>
pub fn grad(&self) -> Option<Arc<Tensor>>
Return the accumulated gradient currently stored for this tensor.
The stored gradient accumulates across repeated backward() calls
until it is cleared explicitly.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::requires_grad(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
let loss = x.exp().unwrap().reduce_sum(&[0]).unwrap();
let _cotangents = loss.backward().unwrap();
let grad = x.grad().unwrap();
assert_eq!(grad.shape(), &[2]);Sourcepub fn clear_grad(&self)
pub fn clear_grad(&self)
Clear the accumulated gradient stored for this tensor.
This only affects this tensor’s gradient slot. Other tensors in the same context retain their gradients until they are cleared explicitly or overwritten by later accumulation.
§Examples
use tenferro::{CpuBackend, EagerContext, EagerTensor, Tensor};
let ctx = EagerContext::with_backend(CpuBackend::new());
let x = EagerTensor::requires_grad_in(Tensor::from_vec(vec![3], vec![1.0_f64, 2.0, 3.0]), ctx.clone());
let y = EagerTensor::requires_grad_in(Tensor::from_vec(vec![3], vec![4.0_f64, 5.0, 6.0]), ctx);
let loss = (&x * &y).reduce_sum(&[0]).unwrap();
let _ = loss.backward().unwrap();
x.clear_grad();
assert!(x.grad().is_none());
assert!(y.grad().is_some());Sourcepub fn tracks_grad(&self) -> bool
pub fn tracks_grad(&self) -> bool
Report whether this tensor participates in gradient tracking.
Tracked tensors keep a gradient slot in their eager context; untracked tensors and detached tensors do not.
§Examples
use tenferro::{CpuBackend, EagerContext, EagerTensor, Tensor};
let ctx = EagerContext::with_backend(CpuBackend::new());
let plain = EagerTensor::from_tensor_in(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]), ctx.clone());
let tracked = EagerTensor::requires_grad_in(Tensor::from_vec(vec![2], vec![3.0_f64, 4.0]), ctx.clone());
let detached = tracked.detach();
assert!(!plain.tracks_grad());
assert!(tracked.tracks_grad());
assert!(!detached.tracks_grad());Sourcepub fn backward(
&self,
) -> Result<HashMap<GlobalValKey<StdTensorOp>, Arc<Tensor>>>
pub fn backward( &self, ) -> Result<HashMap<GlobalValKey<StdTensorOp>, Arc<Tensor>>>
Run reverse-mode AD from this scalar output.
Returns the full cotangent map produced by the reverse pass and also
accumulates into grad() for tracked eager tensors reachable from this
output.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::requires_grad(Tensor::from_vec(vec![3], vec![1.0_f64, 2.0, 3.0]));
let loss = (&x + &x).reduce_sum(&[0]).unwrap();
let _cotangents = loss.backward().unwrap();
let loss = (&x + &x).reduce_sum(&[0]).unwrap();
let _cotangents = loss.backward().unwrap();
assert_eq!(x.grad().unwrap().as_slice::<f64>().unwrap(), &[4.0, 4.0, 4.0]);Source§impl<B: TensorBackend> EagerTensor<B>
impl<B: TensorBackend> EagerTensor<B>
Sourcepub fn add(&self, other: &Self) -> Result<Self>
pub fn add(&self, other: &Self) -> Result<Self>
Elementwise addition.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
let y = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![3.0_f64, 4.0]));
let z = x.add(&y).unwrap();
assert_eq!(z.data().as_slice::<f64>().unwrap(), &[4.0, 6.0]);Sourcepub fn mul(&self, other: &Self) -> Result<Self>
pub fn mul(&self, other: &Self) -> Result<Self>
Elementwise multiplication.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
let y = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![3.0_f64, 4.0]));
let z = x.mul(&y).unwrap();
assert_eq!(z.data().as_slice::<f64>().unwrap(), &[3.0, 8.0]);Sourcepub fn neg(&self) -> Result<Self>
pub fn neg(&self) -> Result<Self>
Negate the tensor.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, -2.0]));
let y = x.neg().unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[-1.0, 2.0]);Sourcepub fn exp(&self) -> Result<Self>
pub fn exp(&self) -> Result<Self>
Elementwise exponential.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![0.0_f64]));
let y = x.exp().unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0]);Sourcepub fn reduce_sum(&self, axes: &[usize]) -> Result<Self>
pub fn reduce_sum(&self, axes: &[usize]) -> Result<Self>
Reduce sum over the requested axes.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 2.0, 3.0, 4.0]));
let y = x.reduce_sum(&[0, 1]).unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[10.0]);Sourcepub fn dot_general(
&self,
other: &Self,
config: DotGeneralConfig,
) -> Result<Self>
pub fn dot_general( &self, other: &Self, config: DotGeneralConfig, ) -> Result<Self>
Execute a dot-general contraction eagerly.
§Examples
use tenferro::{DotGeneralConfig, EagerTensor, Tensor};
let a = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 3], vec![1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0]));
let b = EagerTensor::from_tensor(Tensor::from_vec(vec![3, 2], vec![1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0]));
let c = a.dot_general(&b, DotGeneralConfig {
lhs_contracting_dims: vec![1],
rhs_contracting_dims: vec![0],
lhs_batch_dims: vec![],
rhs_batch_dims: vec![],
lhs_rank: 2,
rhs_rank: 2,
}).unwrap();
assert_eq!(c.data().shape(), &[2, 2]);Sourcepub fn transpose(&self, perm: &[usize]) -> Result<Self>
pub fn transpose(&self, perm: &[usize]) -> Result<Self>
Permute tensor axes.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(
vec![2, 3],
vec![1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0],
));
let y = x.transpose(&[1, 0]).unwrap();
assert_eq!(y.data().shape(), &[3, 2]);
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 3.0, 5.0, 2.0, 4.0, 6.0]);Sourcepub fn reshape(&self, shape: &[usize]) -> Result<Self>
pub fn reshape(&self, shape: &[usize]) -> Result<Self>
Reshape without changing element order.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(
vec![2, 3],
vec![1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0],
));
let y = x.reshape(&[6]).unwrap();
assert_eq!(y.data().shape(), &[6]);
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]);Sourcepub fn slice(&self, config: SliceConfig) -> Result<Self>
pub fn slice(&self, config: SliceConfig) -> Result<Self>
Slice with explicit start, limit, and stride per axis.
§Examples
use tenferro::{EagerTensor, SliceConfig, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![4], vec![1.0_f64, 2.0, 3.0, 4.0]));
let y = x
.slice(SliceConfig {
starts: vec![1],
limits: vec![3],
strides: vec![1],
})
.unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[2.0, 3.0]);Sourcepub fn broadcast_in_dim(&self, shape: &[usize], dims: &[usize]) -> Result<Self>
pub fn broadcast_in_dim(&self, shape: &[usize], dims: &[usize]) -> Result<Self>
Broadcast into a larger shape with explicit dimension placement.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![3], vec![1.0_f64, 2.0, 3.0]));
let y = x.broadcast_in_dim(&[3, 2], &[0]).unwrap();
assert_eq!(y.data().shape(), &[3, 2]);Sourcepub fn convert(&self, to: DType) -> Result<Self>
pub fn convert(&self, to: DType) -> Result<Self>
Convert the tensor to a different dtype.
§Examples
use tenferro::{DType, EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, -2.0]));
let y = x.convert(DType::C64).unwrap();
assert_eq!(y.data().dtype(), DType::C64);
assert_eq!(y.data().shape(), &[2]);Sourcepub fn pad(&self, config: PadConfig) -> Result<Self>
pub fn pad(&self, config: PadConfig) -> Result<Self>
Pad with zeros using StableHLO-style edge and interior padding.
§Examples
use tenferro::{EagerTensor, PadConfig, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
let y = x
.pad(PadConfig {
edge_padding_low: vec![1],
edge_padding_high: vec![1],
interior_padding: vec![1],
})
.unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[0.0, 1.0, 0.0, 2.0, 0.0]);Sourcepub fn reverse(&self, axes: &[usize]) -> Result<Self>
pub fn reverse(&self, axes: &[usize]) -> Result<Self>
Reverse the order of elements along the requested axes.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![4], vec![1.0_f64, 2.0, 3.0, 4.0]));
let y = x.reverse(&[0]).unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[4.0, 3.0, 2.0, 1.0]);Sourcepub fn gather(&self, indices: &Self, config: GatherConfig) -> Result<Self>
pub fn gather(&self, indices: &Self, config: GatherConfig) -> Result<Self>
Gather slices from self using integer start indices.
§Examples
use tenferro::{EagerTensor, GatherConfig, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(
vec![5],
vec![10.0_f64, 20.0, 30.0, 40.0, 50.0],
));
let indices = EagerTensor::from_tensor(Tensor::from_vec(vec![3], vec![4.0_f64, 1.0, 0.0]));
let y = x
.gather(
&indices,
GatherConfig {
offset_dims: vec![],
collapsed_slice_dims: vec![0],
start_index_map: vec![0],
index_vector_dim: 1,
slice_sizes: vec![1],
},
)
.unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[50.0, 20.0, 10.0]);Sourcepub fn scatter(
&self,
indices: &Self,
updates: &Self,
config: ScatterConfig,
) -> Result<Self>
pub fn scatter( &self, indices: &Self, updates: &Self, config: ScatterConfig, ) -> Result<Self>
Scatter updates into self using StableHLO scatter semantics.
§Examples
use tenferro::{EagerTensor, ScatterConfig, Tensor};
let operand = EagerTensor::from_tensor(Tensor::from_vec(vec![4], vec![0.0_f64, 0.0, 0.0, 0.0]));
let indices = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 1], vec![1.0_f64, 3.0]));
let updates = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![5.0_f64, 7.0]));
let result = operand
.scatter(
&indices,
&updates,
ScatterConfig {
update_window_dims: vec![],
inserted_window_dims: vec![0],
scatter_dims_to_operand_dims: vec![0],
index_vector_dim: 1,
},
)
.unwrap();
assert_eq!(result.data().as_slice::<f64>().unwrap(), &[0.0, 5.0, 0.0, 7.0]);Sourcepub fn dynamic_slice(&self, starts: &Self, sizes: &[usize]) -> Result<Self>
pub fn dynamic_slice(&self, starts: &Self, sizes: &[usize]) -> Result<Self>
Slice using runtime start indices.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![5], vec![1.0_f64, 2.0, 3.0, 4.0, 5.0]));
let starts = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![2.0_f64]));
let y = x.dynamic_slice(&starts, &[2]).unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[3.0, 4.0]);Sourcepub fn concatenate(tensors: &[&Self], axis: usize) -> Result<Self>
pub fn concatenate(tensors: &[&Self], axis: usize) -> Result<Self>
Concatenate tensors along one axis.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
let y = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![3.0_f64, 4.0]));
let z = EagerTensor::concatenate(&[&x, &y], 0).unwrap();
assert_eq!(z.data().as_slice::<f64>().unwrap(), &[1.0, 2.0, 3.0, 4.0]);Sourcepub fn extract_diag(&self, axis_a: usize, axis_b: usize) -> Result<Self>
pub fn extract_diag(&self, axis_a: usize, axis_b: usize) -> Result<Self>
Extract the diagonal along two axes.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(
vec![3, 3],
vec![1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0],
));
let y = x.extract_diag(0, 1).unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 5.0, 9.0]);Sourcepub fn embed_diag(&self, axis_a: usize, axis_b: usize) -> Result<Self>
pub fn embed_diag(&self, axis_a: usize, axis_b: usize) -> Result<Self>
Embed a vector or lower-rank tensor along a diagonal.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![3], vec![1.0_f64, 2.0, 3.0]));
let y = x.embed_diag(0, 1).unwrap();
assert_eq!(y.data().shape(), &[3, 3]);
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 3.0]);Sourcepub fn tril(&self, k: i64) -> Result<Self>
pub fn tril(&self, k: i64) -> Result<Self>
Keep the lower triangle and zero the rest.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 2.0, 3.0, 4.0]));
let y = x.tril(0).unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 2.0, 0.0, 4.0]);Sourcepub fn triu(&self, k: i64) -> Result<Self>
pub fn triu(&self, k: i64) -> Result<Self>
Keep the upper triangle and zero the rest.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 2.0, 3.0, 4.0]));
let y = x.triu(0).unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 0.0, 3.0, 4.0]);Sourcepub fn reduce_prod(&self, axes: &[usize]) -> Result<Self>
pub fn reduce_prod(&self, axes: &[usize]) -> Result<Self>
Reduce product over the requested axes.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 2.0, 3.0, 4.0]));
let y = x.reduce_prod(&[0, 1]).unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[24.0]);Sourcepub fn reduce_max(&self, axes: &[usize]) -> Result<Self>
pub fn reduce_max(&self, axes: &[usize]) -> Result<Self>
Reduce maximum over the requested axes.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 2.0, 3.0, 4.0]));
let y = x.reduce_max(&[0, 1]).unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[4.0]);Sourcepub fn reduce_min(&self, axes: &[usize]) -> Result<Self>
pub fn reduce_min(&self, axes: &[usize]) -> Result<Self>
Reduce minimum over the requested axes.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 2.0, 3.0, 4.0]));
let y = x.reduce_min(&[0, 1]).unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0]);Source§impl<B: TensorBackend> EagerTensor<B>
impl<B: TensorBackend> EagerTensor<B>
Sourcepub fn abs(&self) -> Result<Self>
pub fn abs(&self) -> Result<Self>
Elementwise absolute value.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![-1.0_f64, 2.0]));
let y = x.abs().unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 2.0]);Sourcepub fn conj(&self) -> Result<Self>
pub fn conj(&self) -> Result<Self>
Elementwise complex conjugate.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, -2.0]));
let y = x.conj().unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, -2.0]);Sourcepub fn sign(&self) -> Result<Self>
pub fn sign(&self) -> Result<Self>
Elementwise sign.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![-2.0_f64, 3.0]));
let y = x.sign().unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[-1.0, 1.0]);Sourcepub fn log(&self) -> Result<Self>
pub fn log(&self) -> Result<Self>
Elementwise natural logarithm.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![1.0_f64]));
let y = x.log().unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[0.0]);Sourcepub fn sqrt(&self) -> Result<Self>
pub fn sqrt(&self) -> Result<Self>
Elementwise square root.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![4.0_f64]));
let y = x.sqrt().unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[2.0]);Sourcepub fn rsqrt(&self) -> Result<Self>
pub fn rsqrt(&self) -> Result<Self>
Elementwise reciprocal square root.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![4.0_f64]));
let y = x.rsqrt().unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[0.5]);Sourcepub fn sin(&self) -> Result<Self>
pub fn sin(&self) -> Result<Self>
Elementwise sine.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![0.0_f64]));
let y = x.sin().unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[0.0]);Sourcepub fn cos(&self) -> Result<Self>
pub fn cos(&self) -> Result<Self>
Elementwise cosine.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![0.0_f64]));
let y = x.cos().unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0]);Sourcepub fn tanh(&self) -> Result<Self>
pub fn tanh(&self) -> Result<Self>
Elementwise hyperbolic tangent.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![0.0_f64]));
let y = x.tanh().unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[0.0]);Sourcepub fn expm1(&self) -> Result<Self>
pub fn expm1(&self) -> Result<Self>
Elementwise exp(x) - 1.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![0.0_f64]));
let y = x.expm1().unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[0.0]);Sourcepub fn log1p(&self) -> Result<Self>
pub fn log1p(&self) -> Result<Self>
Elementwise log(1 + x).
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![0.0_f64]));
let y = x.log1p().unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[0.0]);Sourcepub fn div(&self, other: &Self) -> Result<Self>
pub fn div(&self, other: &Self) -> Result<Self>
Elementwise division.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![3], vec![8.0_f64, -6.0, 9.0]));
let y = EagerTensor::from_tensor(Tensor::from_vec(vec![3], vec![2.0_f64, 3.0, 3.0]));
let z = x.div(&y).unwrap();
assert_eq!(z.data().as_slice::<f64>().unwrap(), &[4.0, -2.0, 3.0]);Sourcepub fn pow(&self, other: &Self) -> Result<Self>
pub fn pow(&self, other: &Self) -> Result<Self>
Elementwise power.
§Examples
use tenferro::{EagerTensor, Tensor};
let base = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![2.0_f64, 3.0]));
let exp = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![3.0_f64, 2.0]));
let y = base.pow(&exp).unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[8.0, 9.0]);Sourcepub fn maximum(&self, other: &Self) -> Result<Self>
pub fn maximum(&self, other: &Self) -> Result<Self>
Elementwise maximum.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, 5.0]));
let y = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![3.0_f64, 4.0]));
let z = x.maximum(&y).unwrap();
assert_eq!(z.data().as_slice::<f64>().unwrap(), &[3.0, 5.0]);Sourcepub fn minimum(&self, other: &Self) -> Result<Self>
pub fn minimum(&self, other: &Self) -> Result<Self>
Elementwise minimum.
§Examples
use tenferro::{EagerTensor, Tensor};
let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, 5.0]));
let y = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![3.0_f64, 4.0]));
let z = x.minimum(&y).unwrap();
assert_eq!(z.data().as_slice::<f64>().unwrap(), &[1.0, 4.0]);Sourcepub fn select(condition: &Self, on_true: &Self, on_false: &Self) -> Result<Self>
pub fn select(condition: &Self, on_true: &Self, on_false: &Self) -> Result<Self>
Select values from on_true or on_false using condition.
§Examples
use tenferro::{EagerTensor, Tensor};
let condition = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![0.0_f64, 1.0]));
let on_true = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![10.0_f64, 20.0]));
let on_false = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
let y = EagerTensor::select(&condition, &on_true, &on_false).unwrap();
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 20.0]);Source§impl<B: TensorBackend> EagerTensor<B>
impl<B: TensorBackend> EagerTensor<B>
Sourcepub fn svd(&self) -> Result<(Self, Self, Self)>
pub fn svd(&self) -> Result<(Self, Self, Self)>
Singular value decomposition: A = U diag(S) Vh.
§Examples
use tenferro::{EagerTensor, Tensor};
let a = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 0.0, 0.0, 2.0]));
let (u, s, vh) = a.svd().unwrap();
assert_eq!(u.data().shape(), &[2, 2]);
assert_eq!(s.data().shape(), &[2]);
assert_eq!(vh.data().shape(), &[2, 2]);Sourcepub fn qr(&self) -> Result<(Self, Self)>
pub fn qr(&self) -> Result<(Self, Self)>
QR decomposition: A = Q R.
§Examples
use tenferro::{EagerTensor, Tensor};
let a = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 0.0, 0.0, 1.0]));
let (q, r) = a.qr().unwrap();
assert_eq!(q.data().shape(), &[2, 2]);
assert_eq!(r.data().shape(), &[2, 2]);Sourcepub fn lu(&self) -> Result<(Self, Self, Self, Self)>
pub fn lu(&self) -> Result<(Self, Self, Self, Self)>
LU decomposition with partial pivoting: P A = L U.
§Examples
use tenferro::{EagerTensor, Tensor};
let a = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![0.0_f64, 1.0, 1.0, 0.0]));
let (p, l, u, parity) = a.lu().unwrap();
assert_eq!(p.data().shape(), &[2, 2]);
assert_eq!(l.data().shape(), &[2, 2]);
assert_eq!(u.data().shape(), &[2, 2]);
assert_eq!(parity.data().shape(), &[] as &[usize]);Sourcepub fn cholesky(&self) -> Result<Self>
pub fn cholesky(&self) -> Result<Self>
Cholesky factorization: A = L L^T for real inputs.
§Examples
use tenferro::{EagerTensor, Tensor};
let a = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 0.0, 0.0, 1.0]));
let l = a.cholesky().unwrap();
assert_eq!(l.data().shape(), &[2, 2]);
assert_eq!(l.data().as_slice::<f64>().unwrap(), &[1.0, 0.0, 0.0, 1.0]);Sourcepub fn eigh(&self) -> Result<(Self, Self)>
pub fn eigh(&self) -> Result<(Self, Self)>
Symmetric or Hermitian eigendecomposition: A = V diag(W) V^T.
§Examples
use tenferro::{EagerTensor, Tensor};
let a = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 0.0, 0.0, 3.0]));
let (values, vectors) = a.eigh().unwrap();
assert_eq!(values.data().shape(), &[2]);
assert_eq!(vectors.data().shape(), &[2, 2]);Sourcepub fn eig(&self) -> Result<(Self, Self)>
pub fn eig(&self) -> Result<(Self, Self)>
General eigendecomposition.
§Examples
use tenferro::{EagerTensor, Tensor};
let a = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 0.0, 0.0, 3.0]));
let (values, vectors) = a.eig().unwrap();
assert_eq!(values.data().shape(), &[2]);
assert_eq!(vectors.data().shape(), &[2, 2]);Sourcepub fn triangular_solve(
&self,
b: &Self,
left_side: bool,
lower: bool,
transpose_a: bool,
unit_diagonal: bool,
) -> Result<Self>
pub fn triangular_solve( &self, b: &Self, left_side: bool, lower: bool, transpose_a: bool, unit_diagonal: bool, ) -> Result<Self>
Solve a triangular linear system.
§Examples
use tenferro::{EagerTensor, Tensor};
let a = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![2.0_f64, 0.0, 0.0, 4.0]));
let b = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 1], vec![4.0_f64, 8.0]));
let x = a
.triangular_solve(&b, true, true, false, false)
.unwrap();
assert_eq!(x.data().shape(), &[2, 1]);
assert_eq!(x.data().as_slice::<f64>().unwrap(), &[2.0, 2.0]);Trait Implementations§
Source§impl<B: TensorBackend> Add for &EagerTensor<B>
impl<B: TensorBackend> Add for &EagerTensor<B>
Source§type Output = EagerTensor<B>
type Output = EagerTensor<B>
+ operator.Source§impl<B: Clone + TensorBackend> Clone for EagerTensor<B>
impl<B: Clone + TensorBackend> Clone for EagerTensor<B>
Source§fn clone(&self) -> EagerTensor<B>
fn clone(&self) -> EagerTensor<B>
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read moreSource§impl<B: TensorBackend> Mul for &EagerTensor<B>
impl<B: TensorBackend> Mul for &EagerTensor<B>
Source§type Output = EagerTensor<B>
type Output = EagerTensor<B>
* operator.Source§impl<B: TensorBackend> Neg for &EagerTensor<B>
impl<B: TensorBackend> Neg for &EagerTensor<B>
Auto Trait Implementations§
impl<B> Freeze for EagerTensor<B>
impl<B> RefUnwindSafe for EagerTensor<B>
impl<B> Send for EagerTensor<B>where
B: Send,
impl<B> Sync for EagerTensor<B>where
B: Send,
impl<B> Unpin for EagerTensor<B>
impl<B> UnsafeUnpin for EagerTensor<B>
impl<B> UnwindSafe for EagerTensor<B>
Blanket Implementations§
§impl<Rhs, Lhs, Output> AddByRef<Rhs> for Lhs
impl<Rhs, Lhs, Output> AddByRef<Rhs> for Lhs
type Output = Output
fn add_by_ref(&self, rhs: &Rhs) -> <Lhs as AddByRef<Rhs>>::Output
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
§impl<T> DistributionExt for Twhere
T: ?Sized,
impl<T> DistributionExt for Twhere
T: ?Sized,
fn rand<T>(&self, rng: &mut (impl Rng + ?Sized)) -> Twhere
Self: Distribution<T>,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more