tenferro_tensor/cpu/
mod.rs1pub mod affinity;
2pub mod analytic;
3pub mod backend;
4pub mod context;
5pub mod elementwise;
6mod exec_session;
7pub mod gemm;
8pub mod indexing;
9pub mod linalg;
10pub mod reduction;
11pub mod structural;
12
13use strided_kernel::{col_major_strides, StridedArray, StridedView};
14
15use crate::{Buffer, TypedTensor};
16
17pub use affinity::{available_parallelism, process_cpu_affinity_count};
18pub use backend::CpuBackend;
19pub use context::CpuContext;
20pub use elementwise::{
21 abs, add, clamp, compare, conj, div, maximum, minimum, mul, neg, select, sign,
22};
23pub use indexing::{dynamic_slice, gather, pad, scatter};
24pub use reduction::{reduce_max, reduce_min, reduce_prod, reduce_sum};
25pub use structural::{
26 broadcast_in_dim, convert, embed_diagonal, extract_diagonal, reshape, transpose, tril, triu,
27};
28
29pub(crate) fn typed_view<T: Copy>(tensor: &TypedTensor<T>) -> StridedView<'_, T> {
30 match &tensor.buffer {
31 Buffer::Host(data) => {
32 let strides = col_major_strides(&tensor.shape);
33 StridedView::new(data, &tensor.shape, &strides, 0).expect("contiguous host tensor")
34 }
35 Buffer::Backend(_) => todo!("typed_view for backend buffers"),
36 #[cfg(feature = "cubecl")]
37 Buffer::Cubecl(_) => panic!("GPU tensor (Buffer::Cubecl) passed to CPU backend. Use cubecl::download_tensor() to transfer to CPU first."),
38 }
39}
40
41#[allow(clippy::uninit_vec)]
47pub(crate) unsafe fn typed_array_uninit<T>(shape: &[usize]) -> StridedArray<T> {
48 let total: usize = shape.iter().product();
49 let strides = col_major_strides(shape);
50 let mut data = Vec::with_capacity(total);
51 unsafe { data.set_len(total) };
53 StridedArray::from_parts(data, shape, &strides, 0).expect("column-major output array")
54}
55
56pub(crate) fn tensor_from_array<T: Clone>(array: StridedArray<T>) -> TypedTensor<T> {
57 TypedTensor::from_vec(array.dims().to_vec(), array.into_data())
58}