pub struct CpuBackend;Expand description
CPU backend using strided-kernel and GEMM.
Dispatched automatically when tensors reside on
LogicalMemorySpace::MainMemory.
Implements the semiring core and semiring fast-path families for
Standard<T>.
§Examples
ⓘ
use tenferro_algebra::Standard;
use tenferro_device::LogicalMemorySpace;
use tenferro_prims::{CpuBackend, CpuContext, SemiringCoreDescriptor, TensorSemiringCore};
use tenferro_tensor::{MemoryOrder, Tensor};
let mut ctx = CpuContext::try_new(4).unwrap();
let col = MemoryOrder::ColumnMajor;
let mem = LogicalMemorySpace::MainMemory;
let a_base = Tensor::<f64>::zeros(&[3, 4], mem, col).unwrap();
let a = a_base.permute(&[1, 0]).unwrap();
let mut b = Tensor::<f64>::zeros(&[4, 3], mem, col).unwrap();
let plan = <CpuBackend as TensorSemiringCore<Standard<f64>>>::plan(
&mut ctx,
&SemiringCoreDescriptor::MakeContiguous,
&[&[4, 3], &[4, 3]],
)
.unwrap();
<CpuBackend as TensorSemiringCore<Standard<f64>>>::execute(
&mut ctx,
&plan,
1.0,
&[&a],
0.0,
&mut b,
)
.unwrap();Implementations§
Source§impl CpuBackend
impl CpuBackend
Sourcepub fn resolve_conj<T: Scalar + Conjugate>(
_ctx: &mut CpuContext,
src: &Tensor<T>,
) -> Tensor<T>
pub fn resolve_conj<T: Scalar + Conjugate>( _ctx: &mut CpuContext, src: &Tensor<T>, ) -> Tensor<T>
Materialize a lazily-conjugated tensor.
If src.is_conjugated() is false, returns a shallow clone.
If true, routes through the tensor-layer logical combine substrate so
the result is resolved (conjugated = false) without reimplementing the
copy logic here.
§Examples
ⓘ
use tenferro_prims::{CpuBackend, CpuContext};
let mut ctx = CpuContext::try_new(1).unwrap();
let a_conj = a.into_conj(); // lazy
let a_resolved = CpuBackend::resolve_conj(&mut ctx, &a_conj);
assert!(!a_resolved.is_conjugated());Trait Implementations§
Source§impl<S: Scalar + 'static> TensorAnalyticPrims<Standard<S>> for CpuBackend
impl<S: Scalar + 'static> TensorAnalyticPrims<Standard<S>> for CpuBackend
type Plan = CpuAnalyticPlan
type Context = CpuContext
Source§fn plan(
_ctx: &mut Self::Context,
desc: &AnalyticPrimsDescriptor,
shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &AnalyticPrimsDescriptor, shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan an analytic-family operation for the given input/output shapes. Read more
Source§fn execute(
_ctx: &mut Self::Context,
plan: &Self::Plan,
alpha: S,
inputs: &[&Tensor<S>],
beta: S,
output: &mut Tensor<S>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, plan: &Self::Plan, alpha: S, inputs: &[&Tensor<S>], beta: S, output: &mut Tensor<S>, ) -> Result<()>
Execute a previously planned analytic-family operation. Read more
Source§fn has_analytic_support(desc: AnalyticPrimsDescriptor) -> bool
fn has_analytic_support(desc: AnalyticPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor. Read more
Source§impl<Input> TensorComplexRealPrims<Input> for CpuBackend
impl<Input> TensorComplexRealPrims<Input> for CpuBackend
Source§type Real = <Input as ComplexFloat>::Real
type Real = <Input as ComplexFloat>::Real
The real-valued output scalar type.
Source§type Context = CpuContext
type Context = CpuContext
Backend execution context.
Source§fn plan(
_ctx: &mut Self::Context,
desc: &ComplexRealPrimsDescriptor,
shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &ComplexRealPrimsDescriptor, shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan a complex-to-real unary operation for the given input/output shapes.
Source§fn execute(
_ctx: &mut Self::Context,
plan: &Self::Plan,
alpha: Input::Real,
inputs: &[&Tensor<Input>],
beta: Input::Real,
output: &mut Tensor<Self::Real>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, plan: &Self::Plan, alpha: Input::Real, inputs: &[&Tensor<Input>], beta: Input::Real, output: &mut Tensor<Self::Real>, ) -> Result<()>
Execute a previously planned complex-to-real unary operation. Read more
Source§fn has_complex_real_support(desc: ComplexRealPrimsDescriptor) -> bool
fn has_complex_real_support(desc: ComplexRealPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor.
Source§impl<Input> TensorComplexScalePrims<Input> for CpuBackend
impl<Input> TensorComplexScalePrims<Input> for CpuBackend
Source§type Context = CpuContext
type Context = CpuContext
Backend execution context.
Source§fn plan(
_ctx: &mut Self::Context,
desc: &ComplexScalePrimsDescriptor,
shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &ComplexScalePrimsDescriptor, shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan a complex-by-real pointwise operation for the given shapes.
Source§fn execute(
_ctx: &mut Self::Context,
plan: &Self::Plan,
alpha: Input,
lhs: &Tensor<Input>,
rhs: &Tensor<Input::Real>,
beta: Input,
output: &mut Tensor<Input>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, plan: &Self::Plan, alpha: Input, lhs: &Tensor<Input>, rhs: &Tensor<Input::Real>, beta: Input, output: &mut Tensor<Input>, ) -> Result<()>
Execute a previously planned complex-by-real pointwise operation.
Source§fn has_complex_scale_support(desc: ComplexScalePrimsDescriptor) -> bool
fn has_complex_scale_support(desc: ComplexScalePrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor.
Source§impl<S: Scalar + 'static> TensorIndexingPrims<Standard<S>> for CpuBackend
impl<S: Scalar + 'static> TensorIndexingPrims<Standard<S>> for CpuBackend
Source§type Context = CpuContext
type Context = CpuContext
Backend execution context.
Source§fn plan(
_ctx: &mut Self::Context,
desc: &IndexingPrimsDescriptor,
shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &IndexingPrimsDescriptor, shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan an indexing operation for the given input/index/output shapes. Read more
Source§fn execute(
_ctx: &mut Self::Context,
plan: &Self::Plan,
inputs: &[&Tensor<S>],
indices: &Tensor<i64>,
output: &mut Tensor<S>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, plan: &Self::Plan, inputs: &[&Tensor<S>], indices: &Tensor<i64>, output: &mut Tensor<S>, ) -> Result<()>
Execute a previously planned indexing operation. Read more
Source§fn has_indexing_support(_desc: IndexingPrimsDescriptor) -> bool
fn has_indexing_support(_desc: IndexingPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor.
Source§impl<S> TensorMetadataCastPrims<S> for CpuBackend
impl<S> TensorMetadataCastPrims<S> for CpuBackend
Source§type Plan = MetadataCastPrimsDescriptor
type Plan = MetadataCastPrimsDescriptor
Backend plan type.
Source§type Context = CpuContext
type Context = CpuContext
Backend execution context.
Source§fn plan(
_ctx: &mut Self::Context,
desc: &MetadataCastPrimsDescriptor,
shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &MetadataCastPrimsDescriptor, shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan a metadata-to-scalar bridge operation.
Source§fn execute(
_ctx: &mut Self::Context,
plan: &Self::Plan,
alpha: S,
inputs: &[MetadataScalarTensorRef<'_, S>],
beta: S,
output: &mut Tensor<S>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, plan: &Self::Plan, alpha: S, inputs: &[MetadataScalarTensorRef<'_, S>], beta: S, output: &mut Tensor<S>, ) -> Result<()>
Execute a previously planned metadata-to-scalar bridge operation. Read more
Source§fn has_metadata_cast_support(desc: MetadataCastPrimsDescriptor) -> bool
fn has_metadata_cast_support(desc: MetadataCastPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor.
Source§impl TensorMetadataPrims for CpuBackend
impl TensorMetadataPrims for CpuBackend
Source§type Plan = MetadataPrimsDescriptor
type Plan = MetadataPrimsDescriptor
Backend plan type.
Source§type Context = CpuContext
type Context = CpuContext
Backend execution context.
Source§fn plan(
_ctx: &mut Self::Context,
desc: &MetadataPrimsDescriptor,
inputs: &[MetadataTensorRef<'_>],
output: MetadataTensorMut<'_>,
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &MetadataPrimsDescriptor, inputs: &[MetadataTensorRef<'_>], output: MetadataTensorMut<'_>, ) -> Result<Self::Plan>
Plan a metadata-family operation for the given input and output tensor
handles.
Source§fn execute(
_ctx: &mut Self::Context,
plan: &Self::Plan,
inputs: &[MetadataTensorRef<'_>],
output: MetadataTensorMut<'_>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, plan: &Self::Plan, inputs: &[MetadataTensorRef<'_>], output: MetadataTensorMut<'_>, ) -> Result<()>
Execute a previously planned metadata-family operation in overwrite
mode.
Source§fn has_metadata_support(desc: MetadataPrimsDescriptor) -> bool
fn has_metadata_support(desc: MetadataPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor.
Source§impl TensorRngPrims<Standard<f64>> for CpuBackend
impl TensorRngPrims<Standard<f64>> for CpuBackend
Source§type Context = CpuContext
type Context = CpuContext
Backend execution context.
Source§fn plan(
_ctx: &mut Self::Context,
desc: &RngPrimsDescriptor,
shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &RngPrimsDescriptor, shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan a tensor RNG operation for the given output shape.
Source§fn execute(
_ctx: &mut Self::Context,
plan: &Self::Plan,
generator: &mut Generator,
output: &mut Tensor<f64>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, plan: &Self::Plan, generator: &mut Generator, output: &mut Tensor<f64>, ) -> Result<()>
Execute a previously planned RNG operation.
Source§fn has_rng_support(desc: RngPrimsDescriptor) -> bool
fn has_rng_support(desc: RngPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor.
Source§impl TensorRngPrims<Standard<i32>> for CpuBackend
impl TensorRngPrims<Standard<i32>> for CpuBackend
Source§type Context = CpuContext
type Context = CpuContext
Backend execution context.
Source§fn plan(
_ctx: &mut Self::Context,
desc: &RngPrimsDescriptor,
shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &RngPrimsDescriptor, shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan a tensor RNG operation for the given output shape.
Source§fn execute(
_ctx: &mut Self::Context,
plan: &Self::Plan,
generator: &mut Generator,
output: &mut Tensor<i32>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, plan: &Self::Plan, generator: &mut Generator, output: &mut Tensor<i32>, ) -> Result<()>
Execute a previously planned RNG operation.
Source§fn has_rng_support(desc: RngPrimsDescriptor) -> bool
fn has_rng_support(desc: RngPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor.
Source§impl<S: Scalar + 'static> TensorScalarPrims<Standard<S>> for CpuBackend
impl<S: Scalar + 'static> TensorScalarPrims<Standard<S>> for CpuBackend
type Plan = CpuScalarPlan
type Context = CpuContext
Source§fn plan(
_ctx: &mut Self::Context,
desc: &ScalarPrimsDescriptor,
shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &ScalarPrimsDescriptor, shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan a scalar-family operation for the given input/output shapes. Read more
Source§fn execute(
_ctx: &mut Self::Context,
plan: &Self::Plan,
alpha: S,
inputs: &[&Tensor<S>],
beta: S,
output: &mut Tensor<S>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, plan: &Self::Plan, alpha: S, inputs: &[&Tensor<S>], beta: S, output: &mut Tensor<S>, ) -> Result<()>
Execute a previously planned scalar-family operation. Read more
Source§fn has_scalar_support(desc: ScalarPrimsDescriptor) -> bool
fn has_scalar_support(desc: ScalarPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor. Read more
Source§impl<S: Scalar> TensorSemiringCore<Standard<S>> for CpuBackend
impl<S: Scalar> TensorSemiringCore<Standard<S>> for CpuBackend
Source§type Context = CpuContext
type Context = CpuContext
Backend-specific execution context.
Source§fn plan(
ctx: &mut CpuContext,
desc: &SemiringCoreDescriptor,
shapes: &[&[usize]],
) -> Result<CpuPlan<S>>
fn plan( ctx: &mut CpuContext, desc: &SemiringCoreDescriptor, shapes: &[&[usize]], ) -> Result<CpuPlan<S>>
Plan a semiring-core operation.
Source§impl<S: Scalar> TensorSemiringFastPath<Standard<S>> for CpuBackend
impl<S: Scalar> TensorSemiringFastPath<Standard<S>> for CpuBackend
Source§type Context = CpuContext
type Context = CpuContext
Backend-specific execution context.
Source§fn plan(
ctx: &mut CpuContext,
desc: &SemiringFastPathDescriptor,
shapes: &[&[usize]],
) -> Result<CpuPlan<S>>
fn plan( ctx: &mut CpuContext, desc: &SemiringFastPathDescriptor, shapes: &[&[usize]], ) -> Result<CpuPlan<S>>
Plan an optional semiring fast path.
Source§fn execute(
ctx: &mut CpuContext,
plan: &CpuPlan<S>,
alpha: S,
inputs: &[&Tensor<S>],
beta: S,
output: &mut Tensor<S>,
) -> Result<()>
fn execute( ctx: &mut CpuContext, plan: &CpuPlan<S>, alpha: S, inputs: &[&Tensor<S>], beta: S, output: &mut Tensor<S>, ) -> Result<()>
Execute an optional semiring fast path.
Source§fn has_fast_path(desc: SemiringFastPathDescriptor) -> bool
fn has_fast_path(desc: SemiringFastPathDescriptor) -> bool
Query whether the optional path is available.
Source§impl<S: Scalar + PartialOrd + 'static> TensorSortPrims<Standard<S>> for CpuBackend
impl<S: Scalar + PartialOrd + 'static> TensorSortPrims<Standard<S>> for CpuBackend
Source§type Context = CpuContext
type Context = CpuContext
Backend execution context.
Source§fn plan(
_ctx: &mut Self::Context,
desc: &SortPrimsDescriptor,
shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &SortPrimsDescriptor, shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan a sort operation for the given input shape. Read more
Source§fn execute(
_ctx: &mut Self::Context,
plan: &Self::Plan,
input: &Tensor<S>,
values_out: &mut Tensor<S>,
indices_out: &mut Tensor<i64>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, plan: &Self::Plan, input: &Tensor<S>, values_out: &mut Tensor<S>, indices_out: &mut Tensor<i64>, ) -> Result<()>
Execute a previously planned sort operation. Read more
Source§fn has_sort_support(_desc: &SortPrimsDescriptor) -> bool
fn has_sort_support(_desc: &SortPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor.
Auto Trait Implementations§
impl Freeze for CpuBackend
impl RefUnwindSafe for CpuBackend
impl Send for CpuBackend
impl Sync for CpuBackend
impl Unpin for CpuBackend
impl UnwindSafe for CpuBackend
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
§impl<T> DistributionExt for Twhere
T: ?Sized,
impl<T> DistributionExt for Twhere
T: ?Sized,
fn rand<T>(&self, rng: &mut (impl Rng + ?Sized)) -> Twhere
Self: Distribution<T>,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more