pub struct CudaBackend { /* private fields */ }Expand description
CUDA backend (stub) — placeholder when cuda feature is not enabled.
Status: Stub. All methods return errors. Enable the cuda feature
for the real implementation backed by cuTENSOR + cudarc.
§Examples
ⓘ
// Aspirational API — enable `cuda` feature for real backend.
use tenferro_prims::{CudaBackend, BackendRegistry};
let mut registry = BackendRegistry::new();
registry.load_cutensor("/usr/lib/libcutensor.so").unwrap();Implementations§
Source§impl CudaBackend
impl CudaBackend
Sourcepub fn resolve_conj<T: Scalar + Conjugate>(
_ctx: &mut CudaContext,
src: &Tensor<T>,
) -> Tensor<T>
pub fn resolve_conj<T: Scalar + Conjugate>( _ctx: &mut CudaContext, src: &Tensor<T>, ) -> Tensor<T>
Materialize a lazily-conjugated tensor on GPU.
Status: Stub fallback. If data is CPU-accessible, this materializes
conjugation into a new non-conjugated tensor. Otherwise it returns a
clone of src.
Trait Implementations§
Source§impl Drop for CudaBackend
Available on non-crate feature cuda only.
impl Drop for CudaBackend
Available on non-crate feature
cuda only.Source§impl<S: Scalar> TensorAnalyticPrims<Standard<S>> for CudaBackend
Available on non-crate feature cuda only.
impl<S: Scalar> TensorAnalyticPrims<Standard<S>> for CudaBackend
Available on non-crate feature
cuda only.type Plan = ()
type Context = CudaContext
Source§fn plan(
_ctx: &mut Self::Context,
desc: &AnalyticPrimsDescriptor,
_shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &AnalyticPrimsDescriptor, _shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan an analytic-family operation for the given input/output shapes. Read more
Source§fn execute(
_ctx: &mut Self::Context,
_plan: &Self::Plan,
_alpha: S,
_inputs: &[&Tensor<S>],
_beta: S,
_output: &mut Tensor<S>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, _plan: &Self::Plan, _alpha: S, _inputs: &[&Tensor<S>], _beta: S, _output: &mut Tensor<S>, ) -> Result<()>
Execute a previously planned analytic-family operation. Read more
Source§fn has_analytic_support(_desc: AnalyticPrimsDescriptor) -> bool
fn has_analytic_support(_desc: AnalyticPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor. Read more
Source§impl<Input> TensorComplexRealPrims<Input> for CudaBackend
Available on non-crate feature cuda only.
impl<Input> TensorComplexRealPrims<Input> for CudaBackend
Available on non-crate feature
cuda only.Source§type Real = <Input as ComplexFloat>::Real
type Real = <Input as ComplexFloat>::Real
The real-valued output scalar type.
Source§type Context = CudaContext
type Context = CudaContext
Backend execution context.
Source§fn plan(
_ctx: &mut Self::Context,
desc: &ComplexRealPrimsDescriptor,
_shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &ComplexRealPrimsDescriptor, _shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan a complex-to-real unary operation for the given input/output shapes.
Source§fn execute(
_ctx: &mut Self::Context,
_plan: &Self::Plan,
_alpha: Input::Real,
_inputs: &[&Tensor<Input>],
_beta: Input::Real,
_output: &mut Tensor<Self::Real>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, _plan: &Self::Plan, _alpha: Input::Real, _inputs: &[&Tensor<Input>], _beta: Input::Real, _output: &mut Tensor<Self::Real>, ) -> Result<()>
Execute a previously planned complex-to-real unary operation. Read more
Source§fn has_complex_real_support(_desc: ComplexRealPrimsDescriptor) -> bool
fn has_complex_real_support(_desc: ComplexRealPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor.
Source§impl<Input> TensorComplexScalePrims<Input> for CudaBackend
Available on non-crate feature cuda only.
impl<Input> TensorComplexScalePrims<Input> for CudaBackend
Available on non-crate feature
cuda only.Source§type Context = CudaContext
type Context = CudaContext
Backend execution context.
Source§fn plan(
_ctx: &mut Self::Context,
desc: &ComplexScalePrimsDescriptor,
_shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &ComplexScalePrimsDescriptor, _shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan a complex-by-real pointwise operation for the given shapes.
Source§fn execute(
_ctx: &mut Self::Context,
_plan: &Self::Plan,
_alpha: Input,
_lhs: &Tensor<Input>,
_rhs: &Tensor<Input::Real>,
_beta: Input,
_output: &mut Tensor<Input>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, _plan: &Self::Plan, _alpha: Input, _lhs: &Tensor<Input>, _rhs: &Tensor<Input::Real>, _beta: Input, _output: &mut Tensor<Input>, ) -> Result<()>
Execute a previously planned complex-by-real pointwise operation.
Source§fn has_complex_scale_support(_desc: ComplexScalePrimsDescriptor) -> bool
fn has_complex_scale_support(_desc: ComplexScalePrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor.
Source§impl<S: Scalar> TensorIndexingPrims<Standard<S>> for CudaBackend
Available on non-crate feature cuda only.
impl<S: Scalar> TensorIndexingPrims<Standard<S>> for CudaBackend
Available on non-crate feature
cuda only.Source§type Context = CudaContext
type Context = CudaContext
Backend execution context.
Source§fn plan(
_ctx: &mut Self::Context,
desc: &IndexingPrimsDescriptor,
_shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &IndexingPrimsDescriptor, _shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan an indexing operation for the given input/index/output shapes. Read more
Source§fn execute(
_ctx: &mut Self::Context,
_plan: &Self::Plan,
_inputs: &[&Tensor<S>],
_indices: &Tensor<i64>,
_output: &mut Tensor<S>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, _plan: &Self::Plan, _inputs: &[&Tensor<S>], _indices: &Tensor<i64>, _output: &mut Tensor<S>, ) -> Result<()>
Execute a previously planned indexing operation. Read more
Source§fn has_indexing_support(_desc: IndexingPrimsDescriptor) -> bool
fn has_indexing_support(_desc: IndexingPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor.
Source§impl<S: Scalar + NumCast> TensorMetadataCastPrims<S> for CudaBackend
Available on non-crate feature cuda only.
impl<S: Scalar + NumCast> TensorMetadataCastPrims<S> for CudaBackend
Available on non-crate feature
cuda only.Source§type Plan = MetadataCastPrimsDescriptor
type Plan = MetadataCastPrimsDescriptor
Backend plan type.
Source§type Context = CudaContext
type Context = CudaContext
Backend execution context.
Source§fn plan(
_ctx: &mut CudaContext,
desc: &MetadataCastPrimsDescriptor,
_shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut CudaContext, desc: &MetadataCastPrimsDescriptor, _shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan a metadata-to-scalar bridge operation.
Source§fn execute(
_ctx: &mut CudaContext,
_plan: &Self::Plan,
_alpha: S,
_inputs: &[MetadataScalarTensorRef<'_, S>],
_beta: S,
_output: &mut Tensor<S>,
) -> Result<()>
fn execute( _ctx: &mut CudaContext, _plan: &Self::Plan, _alpha: S, _inputs: &[MetadataScalarTensorRef<'_, S>], _beta: S, _output: &mut Tensor<S>, ) -> Result<()>
Execute a previously planned metadata-to-scalar bridge operation. Read more
Source§fn has_metadata_cast_support(_desc: MetadataCastPrimsDescriptor) -> bool
fn has_metadata_cast_support(_desc: MetadataCastPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor.
Source§impl TensorMetadataPrims for CudaBackend
Available on non-crate feature cuda only.
impl TensorMetadataPrims for CudaBackend
Available on non-crate feature
cuda only.Source§type Plan = MetadataPrimsDescriptor
type Plan = MetadataPrimsDescriptor
Backend plan type.
Source§type Context = CudaContext
type Context = CudaContext
Backend execution context.
Source§fn plan(
_ctx: &mut CudaContext,
desc: &MetadataPrimsDescriptor,
_inputs: &[MetadataTensorRef<'_>],
_output: MetadataTensorMut<'_>,
) -> Result<Self::Plan>
fn plan( _ctx: &mut CudaContext, desc: &MetadataPrimsDescriptor, _inputs: &[MetadataTensorRef<'_>], _output: MetadataTensorMut<'_>, ) -> Result<Self::Plan>
Plan a metadata-family operation for the given input and output tensor
handles.
Source§fn execute(
_ctx: &mut CudaContext,
_plan: &Self::Plan,
_inputs: &[MetadataTensorRef<'_>],
_output: MetadataTensorMut<'_>,
) -> Result<()>
fn execute( _ctx: &mut CudaContext, _plan: &Self::Plan, _inputs: &[MetadataTensorRef<'_>], _output: MetadataTensorMut<'_>, ) -> Result<()>
Execute a previously planned metadata-family operation in overwrite
mode.
Source§fn has_metadata_support(_desc: MetadataPrimsDescriptor) -> bool
fn has_metadata_support(_desc: MetadataPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor.
Source§impl TensorRngPrims<Standard<f64>> for CudaBackend
Available on non-crate feature cuda only.
impl TensorRngPrims<Standard<f64>> for CudaBackend
Available on non-crate feature
cuda only.Source§type Context = CudaContext
type Context = CudaContext
Backend execution context.
Source§fn plan(
_ctx: &mut Self::Context,
desc: &RngPrimsDescriptor,
_shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &RngPrimsDescriptor, _shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan a tensor RNG operation for the given output shape.
Source§fn execute(
_ctx: &mut Self::Context,
_plan: &Self::Plan,
_generator: &mut Generator,
_output: &mut Tensor<f64>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, _plan: &Self::Plan, _generator: &mut Generator, _output: &mut Tensor<f64>, ) -> Result<()>
Execute a previously planned RNG operation.
Source§fn has_rng_support(_desc: RngPrimsDescriptor) -> bool
fn has_rng_support(_desc: RngPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor.
Source§impl TensorRngPrims<Standard<i32>> for CudaBackend
Available on non-crate feature cuda only.
impl TensorRngPrims<Standard<i32>> for CudaBackend
Available on non-crate feature
cuda only.Source§type Context = CudaContext
type Context = CudaContext
Backend execution context.
Source§fn plan(
_ctx: &mut Self::Context,
desc: &RngPrimsDescriptor,
_shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &RngPrimsDescriptor, _shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan a tensor RNG operation for the given output shape.
Source§fn execute(
_ctx: &mut Self::Context,
_plan: &Self::Plan,
_generator: &mut Generator,
_output: &mut Tensor<i32>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, _plan: &Self::Plan, _generator: &mut Generator, _output: &mut Tensor<i32>, ) -> Result<()>
Execute a previously planned RNG operation.
Source§fn has_rng_support(_desc: RngPrimsDescriptor) -> bool
fn has_rng_support(_desc: RngPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor.
Source§impl<S: Scalar> TensorScalarPrims<Standard<S>> for CudaBackend
Available on non-crate feature cuda only.
impl<S: Scalar> TensorScalarPrims<Standard<S>> for CudaBackend
Available on non-crate feature
cuda only.type Plan = ()
type Context = CudaContext
Source§fn plan(
_ctx: &mut Self::Context,
desc: &ScalarPrimsDescriptor,
_shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &ScalarPrimsDescriptor, _shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan a scalar-family operation for the given input/output shapes. Read more
Source§fn execute(
_ctx: &mut Self::Context,
_plan: &Self::Plan,
_alpha: S,
_inputs: &[&Tensor<S>],
_beta: S,
_output: &mut Tensor<S>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, _plan: &Self::Plan, _alpha: S, _inputs: &[&Tensor<S>], _beta: S, _output: &mut Tensor<S>, ) -> Result<()>
Execute a previously planned scalar-family operation. Read more
Source§fn has_scalar_support(_desc: ScalarPrimsDescriptor) -> bool
fn has_scalar_support(_desc: ScalarPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor. Read more
Source§impl<S: Scalar> TensorSemiringCore<Standard<S>> for CudaBackend
Available on non-crate feature cuda only.
impl<S: Scalar> TensorSemiringCore<Standard<S>> for CudaBackend
Available on non-crate feature
cuda only.Source§type Context = CudaContext
type Context = CudaContext
Backend-specific execution context.
Source§fn plan(
_ctx: &mut CudaContext,
_desc: &SemiringCoreDescriptor,
_shapes: &[&[usize]],
) -> Result<CudaPlan<S>>
fn plan( _ctx: &mut CudaContext, _desc: &SemiringCoreDescriptor, _shapes: &[&[usize]], ) -> Result<CudaPlan<S>>
Plan a semiring-core operation.
Source§impl<S: Scalar> TensorSemiringFastPath<Standard<S>> for CudaBackend
Available on non-crate feature cuda only.
impl<S: Scalar> TensorSemiringFastPath<Standard<S>> for CudaBackend
Available on non-crate feature
cuda only.Source§type Context = CudaContext
type Context = CudaContext
Backend-specific execution context.
Source§fn plan(
_ctx: &mut CudaContext,
_desc: &SemiringFastPathDescriptor,
_shapes: &[&[usize]],
) -> Result<CudaPlan<S>>
fn plan( _ctx: &mut CudaContext, _desc: &SemiringFastPathDescriptor, _shapes: &[&[usize]], ) -> Result<CudaPlan<S>>
Plan an optional semiring fast path.
Source§fn execute(
_ctx: &mut CudaContext,
_plan: &CudaPlan<S>,
_alpha: S,
_inputs: &[&Tensor<S>],
_beta: S,
_output: &mut Tensor<S>,
) -> Result<()>
fn execute( _ctx: &mut CudaContext, _plan: &CudaPlan<S>, _alpha: S, _inputs: &[&Tensor<S>], _beta: S, _output: &mut Tensor<S>, ) -> Result<()>
Execute an optional semiring fast path.
Source§fn has_fast_path(_desc: SemiringFastPathDescriptor) -> bool
fn has_fast_path(_desc: SemiringFastPathDescriptor) -> bool
Query whether the optional path is available.
Source§impl<S: Scalar + PartialOrd> TensorSortPrims<Standard<S>> for CudaBackend
Available on non-crate feature cuda only.
impl<S: Scalar + PartialOrd> TensorSortPrims<Standard<S>> for CudaBackend
Available on non-crate feature
cuda only.Source§type Context = CudaContext
type Context = CudaContext
Backend execution context.
Source§fn plan(
_ctx: &mut Self::Context,
desc: &SortPrimsDescriptor,
_shapes: &[&[usize]],
) -> Result<Self::Plan>
fn plan( _ctx: &mut Self::Context, desc: &SortPrimsDescriptor, _shapes: &[&[usize]], ) -> Result<Self::Plan>
Plan a sort operation for the given input shape. Read more
Source§fn execute(
_ctx: &mut Self::Context,
_plan: &Self::Plan,
_input: &Tensor<S>,
_values_out: &mut Tensor<S>,
_indices_out: &mut Tensor<i64>,
) -> Result<()>
fn execute( _ctx: &mut Self::Context, _plan: &Self::Plan, _input: &Tensor<S>, _values_out: &mut Tensor<S>, _indices_out: &mut Tensor<i64>, ) -> Result<()>
Execute a previously planned sort operation. Read more
Source§fn has_sort_support(_desc: &SortPrimsDescriptor) -> bool
fn has_sort_support(_desc: &SortPrimsDescriptor) -> bool
Report whether the backend advertises support for the given descriptor.
impl Send for CudaBackend
Available on non-crate feature
cuda only.§Safety
CudaBackend can be safely sent across threads because:
- The
_handleis an opaque pointer to a cuTENSOR handle - The
_lib(libloading::Library) is thread-safe after loading - The handle is read-only after construction
- Drop clears the handle before the library is unloaded, preventing use-after-free
impl Sync for CudaBackend
Available on non-crate feature
cuda only.§Safety
CudaBackend can be safely shared across threads because:
- The cuTENSOR handle is designed for concurrent use from multiple threads
- The library handle (
_lib) is read-only after construction - Symbol lookup via
dlsymis thread-safe on POSIX systems - Drop uses
&mut self, ensuring exclusive access during cleanup
Auto Trait Implementations§
impl Freeze for CudaBackend
impl RefUnwindSafe for CudaBackend
impl Unpin for CudaBackend
impl UnwindSafe for CudaBackend
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
§impl<T> DistributionExt for Twhere
T: ?Sized,
impl<T> DistributionExt for Twhere
T: ?Sized,
fn rand<T>(&self, rng: &mut (impl Rng + ?Sized)) -> Twhere
Self: Distribution<T>,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more