Skip to main content

EagerTensor

Struct EagerTensor 

Source
pub struct EagerTensor<B: TensorBackend = CpuBackend> { /* private fields */ }
Expand description

Eager tensor with reverse-mode autodiff over concrete tensor values.

This executes each primitive immediately and records a lightweight reverse DAG for backward(). Gradients accumulate across repeated backward() calls until they are cleared explicitly.

§Examples

use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::requires_grad(Tensor::from_vec(vec![3], vec![1.0_f64, 2.0, 3.0]));
let loss = (&x * &x).reduce_sum(&[0]).unwrap();
let _cotangents = loss.backward().unwrap();
let loss = (&x * &x).reduce_sum(&[0]).unwrap();
let _cotangents = loss.backward().unwrap();

assert_eq!(x.grad().unwrap().as_slice::<f64>().unwrap(), &[4.0, 8.0, 12.0]);
x.clear_grad();

assert!(x.grad().is_none());

Implementations§

Source§

impl EagerTensor<CpuBackend>

Source

pub fn from_tensor(tensor: Tensor) -> Self

Create an untracked eager tensor on the default CPU backend.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
assert_eq!(x.data().as_slice::<f64>().unwrap(), &[1.0, 2.0]);
assert!(x.grad().is_none());
Source

pub fn requires_grad(tensor: Tensor) -> Self

Create a tracked eager leaf on the default CPU backend.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::requires_grad(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
assert!(x.grad().is_none());
Source§

impl<B: TensorBackend> EagerTensor<B>

Source

pub fn from_tensor_in(tensor: Tensor, ctx: Arc<EagerContext<B>>) -> Self

Create an untracked eager tensor inside an existing eager context.

§Examples
use tenferro::{CpuBackend, EagerContext, EagerTensor, Tensor};

let ctx = EagerContext::with_backend(CpuBackend::new());
let x = EagerTensor::from_tensor_in(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]), ctx);

assert_eq!(x.data().as_slice::<f64>().unwrap(), &[1.0, 2.0]);
Source

pub fn requires_grad_in(tensor: Tensor, ctx: Arc<EagerContext<B>>) -> Self

Create a tracked eager leaf inside an existing eager context.

§Examples
use tenferro::{CpuBackend, EagerContext, EagerTensor, Tensor};

let ctx = EagerContext::with_backend(CpuBackend::new());
let x = EagerTensor::requires_grad_in(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]), ctx);

assert!(x.grad().is_none());
Source

pub fn detach(&self) -> Self

Detach this tensor from the reverse graph.

The returned tensor keeps the concrete value but no longer contributes gradients to the original graph.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::requires_grad(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
let y = x.detach();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 2.0]);
assert!(y.grad().is_none());
Source

pub fn data(&self) -> &Tensor

Borrow the concrete tensor value.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![3.0_f64]));
assert_eq!(x.data().as_slice::<f64>().unwrap(), &[3.0]);
Source

pub fn grad(&self) -> Option<Arc<Tensor>>

Return the accumulated gradient currently stored for this tensor.

The stored gradient accumulates across repeated backward() calls until it is cleared explicitly.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::requires_grad(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
let loss = x.exp().unwrap().reduce_sum(&[0]).unwrap();
let _cotangents = loss.backward().unwrap();

let grad = x.grad().unwrap();
assert_eq!(grad.shape(), &[2]);
Source

pub fn clear_grad(&self)

Clear the accumulated gradient stored for this tensor.

This only affects this tensor’s gradient slot. Other tensors in the same context retain their gradients until they are cleared explicitly or overwritten by later accumulation.

§Examples
use tenferro::{CpuBackend, EagerContext, EagerTensor, Tensor};

let ctx = EagerContext::with_backend(CpuBackend::new());
let x = EagerTensor::requires_grad_in(Tensor::from_vec(vec![3], vec![1.0_f64, 2.0, 3.0]), ctx.clone());
let y = EagerTensor::requires_grad_in(Tensor::from_vec(vec![3], vec![4.0_f64, 5.0, 6.0]), ctx);
let loss = (&x * &y).reduce_sum(&[0]).unwrap();
let _ = loss.backward().unwrap();

x.clear_grad();

assert!(x.grad().is_none());
assert!(y.grad().is_some());
Source

pub fn tracks_grad(&self) -> bool

Report whether this tensor participates in gradient tracking.

Tracked tensors keep a gradient slot in their eager context; untracked tensors and detached tensors do not.

§Examples
use tenferro::{CpuBackend, EagerContext, EagerTensor, Tensor};

let ctx = EagerContext::with_backend(CpuBackend::new());
let plain = EagerTensor::from_tensor_in(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]), ctx.clone());
let tracked = EagerTensor::requires_grad_in(Tensor::from_vec(vec![2], vec![3.0_f64, 4.0]), ctx.clone());
let detached = tracked.detach();

assert!(!plain.tracks_grad());
assert!(tracked.tracks_grad());
assert!(!detached.tracks_grad());
Source

pub fn backward( &self, ) -> Result<HashMap<GlobalValKey<StdTensorOp>, Arc<Tensor>>>

Run reverse-mode AD from this scalar output.

Returns the full cotangent map produced by the reverse pass and also accumulates into grad() for tracked eager tensors reachable from this output.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::requires_grad(Tensor::from_vec(vec![3], vec![1.0_f64, 2.0, 3.0]));
let loss = (&x + &x).reduce_sum(&[0]).unwrap();
let _cotangents = loss.backward().unwrap();
let loss = (&x + &x).reduce_sum(&[0]).unwrap();
let _cotangents = loss.backward().unwrap();

assert_eq!(x.grad().unwrap().as_slice::<f64>().unwrap(), &[4.0, 4.0, 4.0]);
Source§

impl<B: TensorBackend> EagerTensor<B>

Source

pub fn add(&self, other: &Self) -> Result<Self>

Elementwise addition.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
let y = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![3.0_f64, 4.0]));
let z = x.add(&y).unwrap();

assert_eq!(z.data().as_slice::<f64>().unwrap(), &[4.0, 6.0]);
Source

pub fn mul(&self, other: &Self) -> Result<Self>

Elementwise multiplication.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
let y = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![3.0_f64, 4.0]));
let z = x.mul(&y).unwrap();

assert_eq!(z.data().as_slice::<f64>().unwrap(), &[3.0, 8.0]);
Source

pub fn neg(&self) -> Result<Self>

Negate the tensor.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, -2.0]));
let y = x.neg().unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[-1.0, 2.0]);
Source

pub fn exp(&self) -> Result<Self>

Elementwise exponential.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![0.0_f64]));
let y = x.exp().unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0]);
Source

pub fn reduce_sum(&self, axes: &[usize]) -> Result<Self>

Reduce sum over the requested axes.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 2.0, 3.0, 4.0]));
let y = x.reduce_sum(&[0, 1]).unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[10.0]);
Source

pub fn dot_general( &self, other: &Self, config: DotGeneralConfig, ) -> Result<Self>

Execute a dot-general contraction eagerly.

§Examples
use tenferro::{DotGeneralConfig, EagerTensor, Tensor};

let a = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 3], vec![1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0]));
let b = EagerTensor::from_tensor(Tensor::from_vec(vec![3, 2], vec![1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0]));
let c = a.dot_general(&b, DotGeneralConfig {
    lhs_contracting_dims: vec![1],
    rhs_contracting_dims: vec![0],
    lhs_batch_dims: vec![],
    rhs_batch_dims: vec![],
    lhs_rank: 2,
    rhs_rank: 2,
}).unwrap();

assert_eq!(c.data().shape(), &[2, 2]);
Source

pub fn transpose(&self, perm: &[usize]) -> Result<Self>

Permute tensor axes.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(
    vec![2, 3],
    vec![1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0],
));
let y = x.transpose(&[1, 0]).unwrap();

assert_eq!(y.data().shape(), &[3, 2]);
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 3.0, 5.0, 2.0, 4.0, 6.0]);
Source

pub fn reshape(&self, shape: &[usize]) -> Result<Self>

Reshape without changing element order.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(
    vec![2, 3],
    vec![1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0],
));
let y = x.reshape(&[6]).unwrap();

assert_eq!(y.data().shape(), &[6]);
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]);
Source

pub fn slice(&self, config: SliceConfig) -> Result<Self>

Slice with explicit start, limit, and stride per axis.

§Examples
use tenferro::{EagerTensor, SliceConfig, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![4], vec![1.0_f64, 2.0, 3.0, 4.0]));
let y = x
    .slice(SliceConfig {
        starts: vec![1],
        limits: vec![3],
        strides: vec![1],
    })
    .unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[2.0, 3.0]);
Source

pub fn broadcast_in_dim(&self, shape: &[usize], dims: &[usize]) -> Result<Self>

Broadcast into a larger shape with explicit dimension placement.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![3], vec![1.0_f64, 2.0, 3.0]));
let y = x.broadcast_in_dim(&[3, 2], &[0]).unwrap();

assert_eq!(y.data().shape(), &[3, 2]);
Source

pub fn convert(&self, to: DType) -> Result<Self>

Convert the tensor to a different dtype.

§Examples
use tenferro::{DType, EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, -2.0]));
let y = x.convert(DType::C64).unwrap();

assert_eq!(y.data().dtype(), DType::C64);
assert_eq!(y.data().shape(), &[2]);
Source

pub fn pad(&self, config: PadConfig) -> Result<Self>

Pad with zeros using StableHLO-style edge and interior padding.

§Examples
use tenferro::{EagerTensor, PadConfig, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
let y = x
    .pad(PadConfig {
        edge_padding_low: vec![1],
        edge_padding_high: vec![1],
        interior_padding: vec![1],
    })
    .unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[0.0, 1.0, 0.0, 2.0, 0.0]);
Source

pub fn reverse(&self, axes: &[usize]) -> Result<Self>

Reverse the order of elements along the requested axes.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![4], vec![1.0_f64, 2.0, 3.0, 4.0]));
let y = x.reverse(&[0]).unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[4.0, 3.0, 2.0, 1.0]);
Source

pub fn gather(&self, indices: &Self, config: GatherConfig) -> Result<Self>

Gather slices from self using integer start indices.

§Examples
use tenferro::{EagerTensor, GatherConfig, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(
    vec![5],
    vec![10.0_f64, 20.0, 30.0, 40.0, 50.0],
));
let indices = EagerTensor::from_tensor(Tensor::from_vec(vec![3], vec![4.0_f64, 1.0, 0.0]));
let y = x
    .gather(
        &indices,
        GatherConfig {
            offset_dims: vec![],
            collapsed_slice_dims: vec![0],
            start_index_map: vec![0],
            index_vector_dim: 1,
            slice_sizes: vec![1],
        },
    )
    .unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[50.0, 20.0, 10.0]);
Source

pub fn scatter( &self, indices: &Self, updates: &Self, config: ScatterConfig, ) -> Result<Self>

Scatter updates into self using StableHLO scatter semantics.

§Examples
use tenferro::{EagerTensor, ScatterConfig, Tensor};

let operand = EagerTensor::from_tensor(Tensor::from_vec(vec![4], vec![0.0_f64, 0.0, 0.0, 0.0]));
let indices = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 1], vec![1.0_f64, 3.0]));
let updates = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![5.0_f64, 7.0]));
let result = operand
    .scatter(
        &indices,
        &updates,
        ScatterConfig {
            update_window_dims: vec![],
            inserted_window_dims: vec![0],
            scatter_dims_to_operand_dims: vec![0],
            index_vector_dim: 1,
        },
    )
    .unwrap();

assert_eq!(result.data().as_slice::<f64>().unwrap(), &[0.0, 5.0, 0.0, 7.0]);
Source

pub fn dynamic_slice(&self, starts: &Self, sizes: &[usize]) -> Result<Self>

Slice using runtime start indices.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![5], vec![1.0_f64, 2.0, 3.0, 4.0, 5.0]));
let starts = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![2.0_f64]));
let y = x.dynamic_slice(&starts, &[2]).unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[3.0, 4.0]);
Source

pub fn concatenate(tensors: &[&Self], axis: usize) -> Result<Self>

Concatenate tensors along one axis.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
let y = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![3.0_f64, 4.0]));
let z = EagerTensor::concatenate(&[&x, &y], 0).unwrap();

assert_eq!(z.data().as_slice::<f64>().unwrap(), &[1.0, 2.0, 3.0, 4.0]);
Source

pub fn extract_diag(&self, axis_a: usize, axis_b: usize) -> Result<Self>

Extract the diagonal along two axes.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(
    vec![3, 3],
    vec![1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0],
));
let y = x.extract_diag(0, 1).unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 5.0, 9.0]);
Source

pub fn embed_diag(&self, axis_a: usize, axis_b: usize) -> Result<Self>

Embed a vector or lower-rank tensor along a diagonal.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![3], vec![1.0_f64, 2.0, 3.0]));
let y = x.embed_diag(0, 1).unwrap();

assert_eq!(y.data().shape(), &[3, 3]);
assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 3.0]);
Source

pub fn tril(&self, k: i64) -> Result<Self>

Keep the lower triangle and zero the rest.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 2.0, 3.0, 4.0]));
let y = x.tril(0).unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 2.0, 0.0, 4.0]);
Source

pub fn triu(&self, k: i64) -> Result<Self>

Keep the upper triangle and zero the rest.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 2.0, 3.0, 4.0]));
let y = x.triu(0).unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 0.0, 3.0, 4.0]);
Source

pub fn reduce_prod(&self, axes: &[usize]) -> Result<Self>

Reduce product over the requested axes.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 2.0, 3.0, 4.0]));
let y = x.reduce_prod(&[0, 1]).unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[24.0]);
Source

pub fn reduce_max(&self, axes: &[usize]) -> Result<Self>

Reduce maximum over the requested axes.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 2.0, 3.0, 4.0]));
let y = x.reduce_max(&[0, 1]).unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[4.0]);
Source

pub fn reduce_min(&self, axes: &[usize]) -> Result<Self>

Reduce minimum over the requested axes.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 2.0, 3.0, 4.0]));
let y = x.reduce_min(&[0, 1]).unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0]);
Source§

impl<B: TensorBackend> EagerTensor<B>

Source

pub fn abs(&self) -> Result<Self>

Elementwise absolute value.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![-1.0_f64, 2.0]));
let y = x.abs().unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 2.0]);
Source

pub fn conj(&self) -> Result<Self>

Elementwise complex conjugate.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, -2.0]));
let y = x.conj().unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, -2.0]);
Source

pub fn sign(&self) -> Result<Self>

Elementwise sign.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![-2.0_f64, 3.0]));
let y = x.sign().unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[-1.0, 1.0]);
Source

pub fn log(&self) -> Result<Self>

Elementwise natural logarithm.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![1.0_f64]));
let y = x.log().unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[0.0]);
Source

pub fn sqrt(&self) -> Result<Self>

Elementwise square root.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![4.0_f64]));
let y = x.sqrt().unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[2.0]);
Source

pub fn rsqrt(&self) -> Result<Self>

Elementwise reciprocal square root.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![4.0_f64]));
let y = x.rsqrt().unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[0.5]);
Source

pub fn sin(&self) -> Result<Self>

Elementwise sine.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![0.0_f64]));
let y = x.sin().unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[0.0]);
Source

pub fn cos(&self) -> Result<Self>

Elementwise cosine.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![0.0_f64]));
let y = x.cos().unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0]);
Source

pub fn tanh(&self) -> Result<Self>

Elementwise hyperbolic tangent.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![0.0_f64]));
let y = x.tanh().unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[0.0]);
Source

pub fn expm1(&self) -> Result<Self>

Elementwise exp(x) - 1.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![0.0_f64]));
let y = x.expm1().unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[0.0]);
Source

pub fn log1p(&self) -> Result<Self>

Elementwise log(1 + x).

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![1], vec![0.0_f64]));
let y = x.log1p().unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[0.0]);
Source

pub fn div(&self, other: &Self) -> Result<Self>

Elementwise division.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![3], vec![8.0_f64, -6.0, 9.0]));
let y = EagerTensor::from_tensor(Tensor::from_vec(vec![3], vec![2.0_f64, 3.0, 3.0]));
let z = x.div(&y).unwrap();

assert_eq!(z.data().as_slice::<f64>().unwrap(), &[4.0, -2.0, 3.0]);
Source

pub fn pow(&self, other: &Self) -> Result<Self>

Elementwise power.

§Examples
use tenferro::{EagerTensor, Tensor};

let base = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![2.0_f64, 3.0]));
let exp = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![3.0_f64, 2.0]));
let y = base.pow(&exp).unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[8.0, 9.0]);
Source

pub fn maximum(&self, other: &Self) -> Result<Self>

Elementwise maximum.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, 5.0]));
let y = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![3.0_f64, 4.0]));
let z = x.maximum(&y).unwrap();

assert_eq!(z.data().as_slice::<f64>().unwrap(), &[3.0, 5.0]);
Source

pub fn minimum(&self, other: &Self) -> Result<Self>

Elementwise minimum.

§Examples
use tenferro::{EagerTensor, Tensor};

let x = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, 5.0]));
let y = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![3.0_f64, 4.0]));
let z = x.minimum(&y).unwrap();

assert_eq!(z.data().as_slice::<f64>().unwrap(), &[1.0, 4.0]);
Source

pub fn select(condition: &Self, on_true: &Self, on_false: &Self) -> Result<Self>

Select values from on_true or on_false using condition.

§Examples
use tenferro::{EagerTensor, Tensor};

let condition = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![0.0_f64, 1.0]));
let on_true = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![10.0_f64, 20.0]));
let on_false = EagerTensor::from_tensor(Tensor::from_vec(vec![2], vec![1.0_f64, 2.0]));
let y = EagerTensor::select(&condition, &on_true, &on_false).unwrap();

assert_eq!(y.data().as_slice::<f64>().unwrap(), &[1.0, 20.0]);
Source§

impl<B: TensorBackend> EagerTensor<B>

Source

pub fn svd(&self) -> Result<(Self, Self, Self)>

Singular value decomposition: A = U diag(S) Vh.

§Examples
use tenferro::{EagerTensor, Tensor};

let a = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 0.0, 0.0, 2.0]));
let (u, s, vh) = a.svd().unwrap();

assert_eq!(u.data().shape(), &[2, 2]);
assert_eq!(s.data().shape(), &[2]);
assert_eq!(vh.data().shape(), &[2, 2]);
Source

pub fn qr(&self) -> Result<(Self, Self)>

QR decomposition: A = Q R.

§Examples
use tenferro::{EagerTensor, Tensor};

let a = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 0.0, 0.0, 1.0]));
let (q, r) = a.qr().unwrap();

assert_eq!(q.data().shape(), &[2, 2]);
assert_eq!(r.data().shape(), &[2, 2]);
Source

pub fn lu(&self) -> Result<(Self, Self, Self, Self)>

LU decomposition with partial pivoting: P A = L U.

§Examples
use tenferro::{EagerTensor, Tensor};

let a = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![0.0_f64, 1.0, 1.0, 0.0]));
let (p, l, u, parity) = a.lu().unwrap();

assert_eq!(p.data().shape(), &[2, 2]);
assert_eq!(l.data().shape(), &[2, 2]);
assert_eq!(u.data().shape(), &[2, 2]);
assert_eq!(parity.data().shape(), &[] as &[usize]);
Source

pub fn cholesky(&self) -> Result<Self>

Cholesky factorization: A = L L^T for real inputs.

§Examples
use tenferro::{EagerTensor, Tensor};

let a = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 0.0, 0.0, 1.0]));
let l = a.cholesky().unwrap();

assert_eq!(l.data().shape(), &[2, 2]);
assert_eq!(l.data().as_slice::<f64>().unwrap(), &[1.0, 0.0, 0.0, 1.0]);
Source

pub fn eigh(&self) -> Result<(Self, Self)>

Symmetric or Hermitian eigendecomposition: A = V diag(W) V^T.

§Examples
use tenferro::{EagerTensor, Tensor};

let a = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 0.0, 0.0, 3.0]));
let (values, vectors) = a.eigh().unwrap();

assert_eq!(values.data().shape(), &[2]);
assert_eq!(vectors.data().shape(), &[2, 2]);
Source

pub fn eig(&self) -> Result<(Self, Self)>

General eigendecomposition.

§Examples
use tenferro::{EagerTensor, Tensor};

let a = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![1.0_f64, 0.0, 0.0, 3.0]));
let (values, vectors) = a.eig().unwrap();

assert_eq!(values.data().shape(), &[2]);
assert_eq!(vectors.data().shape(), &[2, 2]);
Source

pub fn triangular_solve( &self, b: &Self, left_side: bool, lower: bool, transpose_a: bool, unit_diagonal: bool, ) -> Result<Self>

Solve a triangular linear system.

§Examples
use tenferro::{EagerTensor, Tensor};

let a = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 2], vec![2.0_f64, 0.0, 0.0, 4.0]));
let b = EagerTensor::from_tensor(Tensor::from_vec(vec![2, 1], vec![4.0_f64, 8.0]));
let x = a
    .triangular_solve(&b, true, true, false, false)
    .unwrap();

assert_eq!(x.data().shape(), &[2, 1]);
assert_eq!(x.data().as_slice::<f64>().unwrap(), &[2.0, 2.0]);

Trait Implementations§

Source§

impl<B: TensorBackend> Add for &EagerTensor<B>

Source§

type Output = EagerTensor<B>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &EagerTensor<B>) -> Self::Output

Performs the + operation. Read more
Source§

impl<B: Clone + TensorBackend> Clone for EagerTensor<B>

Source§

fn clone(&self) -> EagerTensor<B>

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl<B: TensorBackend> Mul for &EagerTensor<B>

Source§

type Output = EagerTensor<B>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &EagerTensor<B>) -> Self::Output

Performs the * operation. Read more
Source§

impl<B: TensorBackend> Neg for &EagerTensor<B>

Source§

type Output = EagerTensor<B>

The resulting type after applying the - operator.
Source§

fn neg(self) -> Self::Output

Performs the unary - operation. Read more

Auto Trait Implementations§

§

impl<B> Freeze for EagerTensor<B>

§

impl<B> RefUnwindSafe for EagerTensor<B>

§

impl<B> Send for EagerTensor<B>
where B: Send,

§

impl<B> Sync for EagerTensor<B>
where B: Send,

§

impl<B> Unpin for EagerTensor<B>

§

impl<B> UnsafeUnpin for EagerTensor<B>

§

impl<B> UnwindSafe for EagerTensor<B>

Blanket Implementations§

§

impl<Rhs, Lhs, Output> AddByRef<Rhs> for Lhs
where &'a Lhs: for<'a> Add<&'a Rhs, Output = Output>,

§

type Output = Output

§

fn add_by_ref(&self, rhs: &Rhs) -> <Lhs as AddByRef<Rhs>>::Output

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
§

impl<T> ByRef<T> for T

§

fn by_ref(&self) -> &T

Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
§

impl<T> DistributionExt for T
where T: ?Sized,

§

fn rand<T>(&self, rng: &mut (impl Rng + ?Sized)) -> T
where Self: Distribution<T>,

Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
§

impl<Rhs, Lhs, Output> MulByRef<Rhs> for Lhs
where &'a Lhs: for<'a> Mul<&'a Rhs, Output = Output>,

§

type Output = Output

§

fn mul_by_ref(&self, rhs: &Rhs) -> <Lhs as MulByRef<Rhs>>::Output

§

impl<T, Output> NegByRef for T
where &'a T: for<'a> Neg<Output = Output>,

§

type Output = Output

§

fn neg_by_ref(&self) -> <T as NegByRef>::Output

§

impl<T> Pointable for T

§

const ALIGN: usize

The alignment of pointer.
§

type Init = T

The type for initializers.
§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

§

fn vzip(self) -> V

§

impl<T, U> Imply<T> for U
where T: ?Sized, U: ?Sized,

§

impl<T> MaybeSend for T

§

impl<T> MaybeSendSync for T

§

impl<T> MaybeSync for T