Skip to main content

CachedFunction

Struct CachedFunction 

Source
pub struct CachedFunction<V, F, I = usize>
where I: IndexInt, V: Clone + Send + Sync + 'static, F: Fn(&[I]) -> V + Send + Sync,
{ /* private fields */ }
Expand description

A wrapper that caches function evaluations for multi-index inputs.

Thread-safe: all methods take &self. Multiple threads can call eval concurrently.

§Type parameters

  • V - cached value type
  • F - single-evaluation function Fn(&[I]) -> V
  • I - index element type (default usize); use u8 for quantics

§Examples

use tensor4all_tcicore::CachedFunction;

// Cache a 2-site function with local dimensions [3, 4]
let cf = CachedFunction::new(
    |idx: &[usize]| (idx[0] * 4 + idx[1]) as f64,
    &[3, 4],
).unwrap();

// First call evaluates and caches
let v00 = cf.eval(&[0, 0]);
assert_eq!(v00, 0.0);
assert_eq!(cf.num_evals(), 1);
assert_eq!(cf.num_cache_hits(), 0);

// Second call uses cache
let v00_again = cf.eval(&[0, 0]);
assert_eq!(v00_again, 0.0);
assert_eq!(cf.num_cache_hits(), 1);

let v12 = cf.eval(&[1, 2]);
assert_eq!(v12, 6.0); // 1*4 + 2

Implementations§

Source§

impl<V, F, I> CachedFunction<V, F, I>
where I: IndexInt, V: Clone + Send + Sync + 'static, F: Fn(&[I]) -> V + Send + Sync,

Source

pub fn new(func: F, local_dims: &[usize]) -> Result<Self, CacheKeyError>

Create a new cached function with automatic key selection (up to 1024 bits).

§Examples
use tensor4all_tcicore::CachedFunction;

let cf = CachedFunction::new(|idx: &[usize]| idx[0] + idx[1], &[2, 3]).unwrap();
assert_eq!(cf.eval(&[1, 2]), 3);
assert_eq!(cf.num_sites(), 2);
assert_eq!(cf.local_dims(), &[2, 3]);
Source

pub fn with_batch<B>( func: F, batch_func: B, local_dims: &[usize], ) -> Result<Self, CacheKeyError>
where B: Fn(&[Vec<I>]) -> Vec<V> + Send + Sync + 'static,

Create with a batch function for efficient multi-point evaluation.

The batch function is used for cache misses during eval_batch calls, enabling amortized cost when evaluating many indices at once (e.g., batch FFI calls or vectorized computations).

§Examples
use tensor4all_tcicore::CachedFunction;

let cf = CachedFunction::with_batch(
    |idx: &[usize]| idx[0] * 10 + idx[1],
    |indices: &[Vec<usize>]| indices.iter().map(|idx| idx[0] * 10 + idx[1]).collect(),
    &[3, 4],
).unwrap();

let results = cf.eval_batch(&[vec![0, 1], vec![2, 3]]);
assert_eq!(results, vec![1, 23]);
assert_eq!(cf.num_evals(), 2);
Source

pub fn with_key_type<K: CacheKey>( func: F, local_dims: &[usize], ) -> Result<Self, CacheKeyError>

Create with an explicit key type for index spaces larger than 1024 bits.

§Example
use bnum::types::U2048;
use tensor4all_tcicore::{CacheKey, CachedFunction};

#[derive(Clone, Hash, PartialEq, Eq)]
struct U2048Key(U2048);

impl CacheKey for U2048Key {
    const BITS_COUNT: u32 = 2048;
    const ZERO: Self = Self(U2048::ZERO);
    const ONE: Self = Self(U2048::ONE);

    fn from_usize(v: usize) -> Self {
        Self(U2048::from(v as u64))
    }

    fn checked_mul(self, rhs: Self) -> Option<Self> {
        self.0.checked_mul(rhs.0).map(Self)
    }

    fn wrapping_add(self, rhs: Self) -> Self {
        Self(self.0.wrapping_add(rhs.0))
    }
}

let local_dims = vec![2usize; 1025];
let cf = CachedFunction::with_key_type::<U2048Key>(
    |idx: &[usize]| idx.iter().sum::<usize>(),
    &local_dims,
).unwrap();
let zeros = vec![0usize; 1025];

assert_eq!(cf.eval(&zeros), 0);
assert_eq!(cf.key_type(), "custom");
Source

pub fn with_key_type_and_batch<K: CacheKey, B>( func: F, batch_func: B, local_dims: &[usize], ) -> Result<Self, CacheKeyError>
where B: Fn(&[Vec<I>]) -> Vec<V> + Send + Sync + 'static,

Create with explicit key type and batch function.

Combines with_key_type and with_batch for index spaces larger than 1024 bits that also benefit from batch evaluation.

§Examples
use tensor4all_tcicore::CachedFunction;

// Use u128 key type with batch support
let cf = CachedFunction::with_key_type_and_batch::<u128, _>(
    |idx: &[usize]| idx.iter().sum::<usize>(),
    |indices: &[Vec<usize>]| indices.iter().map(|idx| idx.iter().sum()).collect(),
    &[2, 3, 4],
).unwrap();

let results = cf.eval_batch(&[vec![0, 0, 0], vec![1, 2, 3]]);
assert_eq!(results, vec![0, 6]);
Source

pub fn eval(&self, idx: &[I]) -> V

Evaluate at a given index, using cache if available.

On the first call for a given index, the wrapped function is invoked and the result is cached. Subsequent calls with the same index return the cached value. This method is thread-safe.

§Examples
use tensor4all_tcicore::CachedFunction;

let cf = CachedFunction::new(|idx: &[usize]| idx[0] * idx[1], &[5, 5]).unwrap();
assert_eq!(cf.eval(&[3, 4]), 12);
assert_eq!(cf.num_evals(), 1);

// Cache hit
assert_eq!(cf.eval(&[3, 4]), 12);
assert_eq!(cf.num_evals(), 1);
assert_eq!(cf.num_cache_hits(), 1);
Source

pub fn eval_no_cache(&self, idx: &[I]) -> V

Evaluate bypassing the cache.

The result is neither read from nor stored in the cache, and evaluation counters are not updated. Useful for verification or when the caller intentionally wants a fresh evaluation.

§Examples
use tensor4all_tcicore::CachedFunction;

let cf = CachedFunction::new(|idx: &[usize]| idx[0] + 1, &[4]).unwrap();
assert_eq!(cf.eval_no_cache(&[2]), 3);
assert_eq!(cf.cache_size(), 0);
assert_eq!(cf.num_evals(), 0);
Source

pub fn eval_batch(&self, indices: &[Vec<I>]) -> Vec<V>

Evaluate at multiple indices. Uses batch function for cache misses if available.

Returns results in the same order as the input indices.

§Examples
use tensor4all_tcicore::CachedFunction;

let cf = CachedFunction::new(|idx: &[usize]| idx[0] * 2 + idx[1], &[2, 2]).unwrap();
let results = cf.eval_batch(&[vec![0, 0], vec![0, 1], vec![1, 0]]);
assert_eq!(results, vec![0, 1, 2]);
Source

pub fn local_dims(&self) -> &[usize]

Get the local dimensions.

§Examples
use tensor4all_tcicore::CachedFunction;

let cf = CachedFunction::new(|idx: &[usize]| 0, &[3, 4, 5]).unwrap();
assert_eq!(cf.local_dims(), &[3, 4, 5]);
Source

pub fn num_sites(&self) -> usize

Get the number of sites (length of the multi-index).

§Examples
use tensor4all_tcicore::CachedFunction;

let cf = CachedFunction::new(|idx: &[usize]| 0, &[2, 3]).unwrap();
assert_eq!(cf.num_sites(), 2);
Source

pub fn num_evals(&self) -> usize

Get the number of function evaluations (cache misses).

§Examples
use tensor4all_tcicore::CachedFunction;

let cf = CachedFunction::new(|idx: &[usize]| idx[0], &[4]).unwrap();
cf.eval(&[0]);
cf.eval(&[1]);
cf.eval(&[0]); // cache hit, not a new eval
assert_eq!(cf.num_evals(), 2);
Source

pub fn num_cache_hits(&self) -> usize

Get the number of cache hits.

§Examples
use tensor4all_tcicore::CachedFunction;

let cf = CachedFunction::new(|idx: &[usize]| idx[0], &[4]).unwrap();
cf.eval(&[0]);
assert_eq!(cf.num_cache_hits(), 0);
cf.eval(&[0]);
assert_eq!(cf.num_cache_hits(), 1);
Source

pub fn total_calls(&self) -> usize

Get total calls (evaluations + cache hits).

§Examples
use tensor4all_tcicore::CachedFunction;

let cf = CachedFunction::new(|idx: &[usize]| idx[0], &[4]).unwrap();
cf.eval(&[0]);
cf.eval(&[1]);
cf.eval(&[0]); // cache hit
assert_eq!(cf.total_calls(), 3);
assert_eq!(cf.total_calls(), cf.num_evals() + cf.num_cache_hits());
Source

pub fn cache_hit_ratio(&self) -> f64

Get cache hit ratio (0.0 when no calls have been made).

Returns num_cache_hits() / total_calls() as a value in [0.0, 1.0].

§Examples
use tensor4all_tcicore::CachedFunction;

let cf = CachedFunction::new(|idx: &[usize]| idx[0], &[4]).unwrap();
assert_eq!(cf.cache_hit_ratio(), 0.0); // no calls yet

cf.eval(&[0]);
cf.eval(&[0]); // cache hit
assert!((cf.cache_hit_ratio() - 0.5).abs() < 1e-10);
Source

pub fn clear_cache(&self)

Clear the cache.

§Examples
use tensor4all_tcicore::CachedFunction;

let cf = CachedFunction::new(|idx: &[usize]| idx[0], &[4]).unwrap();
cf.eval(&[2]);
assert_eq!(cf.cache_size(), 1);
cf.clear_cache();
assert_eq!(cf.cache_size(), 0);
Source

pub fn cache_size(&self) -> usize

Number of cached entries.

§Examples
use tensor4all_tcicore::CachedFunction;

let cf = CachedFunction::new(|idx: &[usize]| idx[0], &[4]).unwrap();
assert_eq!(cf.cache_size(), 0);
cf.eval(&[0]);
cf.eval(&[1]);
assert_eq!(cf.cache_size(), 2);
cf.eval(&[0]); // cache hit, no new entry
assert_eq!(cf.cache_size(), 2);
Source

pub fn is_cached(&self, idx: &[I]) -> bool

Check if an index is cached.

§Examples
use tensor4all_tcicore::CachedFunction;

let cf = CachedFunction::new(|idx: &[usize]| idx[0], &[4]).unwrap();
assert!(!cf.is_cached(&[1]));
cf.eval(&[1]);
assert!(cf.is_cached(&[1]));
Source

pub fn key_type(&self) -> &'static str

Internal key type name (for debugging).

Returns "u64", "u128", "U256", "U512", "U1024" for automatically selected types, or "custom" when constructed with with_key_type.

§Examples
use tensor4all_tcicore::CachedFunction;

// Small index space uses u64
let cf = CachedFunction::new(|idx: &[usize]| 0, &[2, 3]).unwrap();
assert_eq!(cf.key_type(), "u64");

Auto Trait Implementations§

§

impl<V, F, I = usize> !Freeze for CachedFunction<V, F, I>

§

impl<V, F, I = usize> !RefUnwindSafe for CachedFunction<V, F, I>

§

impl<V, F, I> Send for CachedFunction<V, F, I>

§

impl<V, F, I> Sync for CachedFunction<V, F, I>

§

impl<V, F, I> Unpin for CachedFunction<V, F, I>
where F: Unpin, I: Unpin, V: Unpin,

§

impl<V, F, I> UnsafeUnpin for CachedFunction<V, F, I>
where F: UnsafeUnpin,

§

impl<V, F, I = usize> !UnwindSafe for CachedFunction<V, F, I>

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
§

impl<U> As for U

§

fn as_<T>(self) -> T
where T: CastFrom<U>,

Casts self to type T. The semantics of numeric casting with the as operator are followed, so <T as As>::as_::<U> can be used in the same way as T as U for numeric conversions. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
§

impl<T> ByRef<T> for T

§

fn by_ref(&self) -> &T

§

impl<T> ByRef<T> for T

§

fn by_ref(&self) -> &T

§

impl<T> DistributionExt for T
where T: ?Sized,

§

fn rand<T>(&self, rng: &mut (impl Rng + ?Sized)) -> T
where Self: Distribution<T>,

§

impl<T> DistributionExt for T
where T: ?Sized,

§

fn rand<T>(&self, rng: &mut (impl Rng + ?Sized)) -> T
where Self: Distribution<T>,

Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
§

impl<T> Pointable for T

§

const ALIGN: usize

The alignment of pointer.
§

type Init = T

The type for initializers.
§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

§

fn vzip(self) -> V

§

impl<T, U> Imply<T> for U
where T: ?Sized, U: ?Sized,

§

impl<T> MaybeSend for T

§

impl<T> MaybeSendSync for T

§

impl<T> MaybeSync for T