tenferro_tensor/tensor/
transfer.rs

1use tenferro_algebra::Scalar;
2use tenferro_device::{LogicalMemorySpace, Result};
3
4use super::Tensor;
5
6impl<T: Scalar> Tensor<T> {
7    /// Asynchronously transfer this tensor to a different memory space.
8    ///
9    /// # Examples
10    ///
11    /// ```ignore
12    /// let t = Tensor::<f64>::zeros(&[2, 3], LogicalMemorySpace::MainMemory, MemoryOrder::ColumnMajor).unwrap();
13    /// let t2 = t.to_memory_space_async(LogicalMemorySpace::MainMemory).unwrap();
14    /// assert_eq!(t2.dims(), &[2, 3]);
15    /// ```
16    pub fn to_memory_space_async(&self, target: LogicalMemorySpace) -> Result<Tensor<T>> {
17        if target == self.logical_memory_space {
18            return Ok(self.clone());
19        }
20
21        #[cfg(feature = "cuda")]
22        {
23            return crate::cuda_runtime::transfer_tensor(self, target);
24        }
25
26        #[cfg(not(feature = "cuda"))]
27        {
28            Err(tenferro_device::Error::DeviceError(
29                "GPU memory transfer not available: rebuild with `tenferro-tensor --features cuda`"
30                    .into(),
31            ))
32        }
33    }
34}
35
36impl<T> Tensor<T> {
37    /// Wait for any pending GPU computation to complete.
38    ///
39    /// # Examples
40    ///
41    /// ```ignore
42    /// let t = Tensor::<f64>::zeros(&[2, 3], LogicalMemorySpace::MainMemory, MemoryOrder::ColumnMajor).unwrap();
43    /// t.wait();
44    /// ```
45    pub fn wait(&self) {
46        let _ = self;
47    }
48
49    /// Check if tensor data is ready without blocking.
50    ///
51    /// # Examples
52    ///
53    /// ```ignore
54    /// let t = Tensor::<f64>::zeros(&[2, 3], LogicalMemorySpace::MainMemory, MemoryOrder::ColumnMajor).unwrap();
55    /// assert!(t.is_ready());
56    /// ```
57    pub fn is_ready(&self) -> bool {
58        self.event.is_none()
59    }
60}