pub struct CachedFunction<V, F, I = usize>{ /* private fields */ }Expand description
A wrapper that caches function evaluations for multi-index inputs.
Thread-safe: all methods take &self. Multiple threads can call eval
concurrently.
§Type parameters
V- cached value typeF- single-evaluation functionFn(&[I]) -> VI- index element type (defaultusize); useu8for quantics
§Examples
use tensor4all_tcicore::CachedFunction;
// Cache a 2-site function with local dimensions [3, 4]
let cf = CachedFunction::new(
|idx: &[usize]| (idx[0] * 4 + idx[1]) as f64,
&[3, 4],
).unwrap();
// First call evaluates and caches
let v00 = cf.eval(&[0, 0]);
assert_eq!(v00, 0.0);
assert_eq!(cf.num_evals(), 1);
assert_eq!(cf.num_cache_hits(), 0);
// Second call uses cache
let v00_again = cf.eval(&[0, 0]);
assert_eq!(v00_again, 0.0);
assert_eq!(cf.num_cache_hits(), 1);
let v12 = cf.eval(&[1, 2]);
assert_eq!(v12, 6.0); // 1*4 + 2Implementations§
Source§impl<V, F, I> CachedFunction<V, F, I>
impl<V, F, I> CachedFunction<V, F, I>
Sourcepub fn new(func: F, local_dims: &[usize]) -> Result<Self, CacheKeyError>
pub fn new(func: F, local_dims: &[usize]) -> Result<Self, CacheKeyError>
Create a new cached function with automatic key selection (up to 1024 bits).
§Examples
use tensor4all_tcicore::CachedFunction;
let cf = CachedFunction::new(|idx: &[usize]| idx[0] + idx[1], &[2, 3]).unwrap();
assert_eq!(cf.eval(&[1, 2]), 3);
assert_eq!(cf.num_sites(), 2);
assert_eq!(cf.local_dims(), &[2, 3]);Sourcepub fn with_batch<B>(
func: F,
batch_func: B,
local_dims: &[usize],
) -> Result<Self, CacheKeyError>
pub fn with_batch<B>( func: F, batch_func: B, local_dims: &[usize], ) -> Result<Self, CacheKeyError>
Create with a batch function for efficient multi-point evaluation.
The batch function is used for cache misses during eval_batch
calls, enabling amortized cost when evaluating many indices at once
(e.g., batch FFI calls or vectorized computations).
§Examples
use tensor4all_tcicore::CachedFunction;
let cf = CachedFunction::with_batch(
|idx: &[usize]| idx[0] * 10 + idx[1],
|indices: &[Vec<usize>]| indices.iter().map(|idx| idx[0] * 10 + idx[1]).collect(),
&[3, 4],
).unwrap();
let results = cf.eval_batch(&[vec![0, 1], vec![2, 3]]);
assert_eq!(results, vec![1, 23]);
assert_eq!(cf.num_evals(), 2);Sourcepub fn with_key_type<K: CacheKey>(
func: F,
local_dims: &[usize],
) -> Result<Self, CacheKeyError>
pub fn with_key_type<K: CacheKey>( func: F, local_dims: &[usize], ) -> Result<Self, CacheKeyError>
Create with an explicit key type for index spaces larger than 1024 bits.
§Example
use bnum::types::U2048;
use tensor4all_tcicore::{CacheKey, CachedFunction};
#[derive(Clone, Hash, PartialEq, Eq)]
struct U2048Key(U2048);
impl CacheKey for U2048Key {
const BITS_COUNT: u32 = 2048;
const ZERO: Self = Self(U2048::ZERO);
const ONE: Self = Self(U2048::ONE);
fn from_usize(v: usize) -> Self {
Self(U2048::from(v as u64))
}
fn checked_mul(self, rhs: Self) -> Option<Self> {
self.0.checked_mul(rhs.0).map(Self)
}
fn wrapping_add(self, rhs: Self) -> Self {
Self(self.0.wrapping_add(rhs.0))
}
}
let local_dims = vec![2usize; 1025];
let cf = CachedFunction::with_key_type::<U2048Key>(
|idx: &[usize]| idx.iter().sum::<usize>(),
&local_dims,
).unwrap();
let zeros = vec![0usize; 1025];
assert_eq!(cf.eval(&zeros), 0);
assert_eq!(cf.key_type(), "custom");Sourcepub fn with_key_type_and_batch<K: CacheKey, B>(
func: F,
batch_func: B,
local_dims: &[usize],
) -> Result<Self, CacheKeyError>
pub fn with_key_type_and_batch<K: CacheKey, B>( func: F, batch_func: B, local_dims: &[usize], ) -> Result<Self, CacheKeyError>
Create with explicit key type and batch function.
Combines with_key_type and
with_batch for index spaces larger than 1024
bits that also benefit from batch evaluation.
§Examples
use tensor4all_tcicore::CachedFunction;
// Use u128 key type with batch support
let cf = CachedFunction::with_key_type_and_batch::<u128, _>(
|idx: &[usize]| idx.iter().sum::<usize>(),
|indices: &[Vec<usize>]| indices.iter().map(|idx| idx.iter().sum()).collect(),
&[2, 3, 4],
).unwrap();
let results = cf.eval_batch(&[vec![0, 0, 0], vec![1, 2, 3]]);
assert_eq!(results, vec![0, 6]);Sourcepub fn eval(&self, idx: &[I]) -> V
pub fn eval(&self, idx: &[I]) -> V
Evaluate at a given index, using cache if available.
On the first call for a given index, the wrapped function is invoked and the result is cached. Subsequent calls with the same index return the cached value. This method is thread-safe.
§Examples
use tensor4all_tcicore::CachedFunction;
let cf = CachedFunction::new(|idx: &[usize]| idx[0] * idx[1], &[5, 5]).unwrap();
assert_eq!(cf.eval(&[3, 4]), 12);
assert_eq!(cf.num_evals(), 1);
// Cache hit
assert_eq!(cf.eval(&[3, 4]), 12);
assert_eq!(cf.num_evals(), 1);
assert_eq!(cf.num_cache_hits(), 1);Sourcepub fn eval_no_cache(&self, idx: &[I]) -> V
pub fn eval_no_cache(&self, idx: &[I]) -> V
Evaluate bypassing the cache.
The result is neither read from nor stored in the cache, and evaluation counters are not updated. Useful for verification or when the caller intentionally wants a fresh evaluation.
§Examples
use tensor4all_tcicore::CachedFunction;
let cf = CachedFunction::new(|idx: &[usize]| idx[0] + 1, &[4]).unwrap();
assert_eq!(cf.eval_no_cache(&[2]), 3);
assert_eq!(cf.cache_size(), 0);
assert_eq!(cf.num_evals(), 0);Sourcepub fn eval_batch(&self, indices: &[Vec<I>]) -> Vec<V>
pub fn eval_batch(&self, indices: &[Vec<I>]) -> Vec<V>
Evaluate at multiple indices. Uses batch function for cache misses if available.
Returns results in the same order as the input indices.
§Examples
use tensor4all_tcicore::CachedFunction;
let cf = CachedFunction::new(|idx: &[usize]| idx[0] * 2 + idx[1], &[2, 2]).unwrap();
let results = cf.eval_batch(&[vec![0, 0], vec![0, 1], vec![1, 0]]);
assert_eq!(results, vec![0, 1, 2]);Sourcepub fn local_dims(&self) -> &[usize]
pub fn local_dims(&self) -> &[usize]
Get the local dimensions.
§Examples
use tensor4all_tcicore::CachedFunction;
let cf = CachedFunction::new(|idx: &[usize]| 0, &[3, 4, 5]).unwrap();
assert_eq!(cf.local_dims(), &[3, 4, 5]);Sourcepub fn num_sites(&self) -> usize
pub fn num_sites(&self) -> usize
Get the number of sites (length of the multi-index).
§Examples
use tensor4all_tcicore::CachedFunction;
let cf = CachedFunction::new(|idx: &[usize]| 0, &[2, 3]).unwrap();
assert_eq!(cf.num_sites(), 2);Sourcepub fn num_evals(&self) -> usize
pub fn num_evals(&self) -> usize
Get the number of function evaluations (cache misses).
§Examples
use tensor4all_tcicore::CachedFunction;
let cf = CachedFunction::new(|idx: &[usize]| idx[0], &[4]).unwrap();
cf.eval(&[0]);
cf.eval(&[1]);
cf.eval(&[0]); // cache hit, not a new eval
assert_eq!(cf.num_evals(), 2);Sourcepub fn num_cache_hits(&self) -> usize
pub fn num_cache_hits(&self) -> usize
Get the number of cache hits.
§Examples
use tensor4all_tcicore::CachedFunction;
let cf = CachedFunction::new(|idx: &[usize]| idx[0], &[4]).unwrap();
cf.eval(&[0]);
assert_eq!(cf.num_cache_hits(), 0);
cf.eval(&[0]);
assert_eq!(cf.num_cache_hits(), 1);Sourcepub fn total_calls(&self) -> usize
pub fn total_calls(&self) -> usize
Get total calls (evaluations + cache hits).
§Examples
use tensor4all_tcicore::CachedFunction;
let cf = CachedFunction::new(|idx: &[usize]| idx[0], &[4]).unwrap();
cf.eval(&[0]);
cf.eval(&[1]);
cf.eval(&[0]); // cache hit
assert_eq!(cf.total_calls(), 3);
assert_eq!(cf.total_calls(), cf.num_evals() + cf.num_cache_hits());Sourcepub fn cache_hit_ratio(&self) -> f64
pub fn cache_hit_ratio(&self) -> f64
Get cache hit ratio (0.0 when no calls have been made).
Returns num_cache_hits() / total_calls() as a value in [0.0, 1.0].
§Examples
use tensor4all_tcicore::CachedFunction;
let cf = CachedFunction::new(|idx: &[usize]| idx[0], &[4]).unwrap();
assert_eq!(cf.cache_hit_ratio(), 0.0); // no calls yet
cf.eval(&[0]);
cf.eval(&[0]); // cache hit
assert!((cf.cache_hit_ratio() - 0.5).abs() < 1e-10);Sourcepub fn clear_cache(&self)
pub fn clear_cache(&self)
Clear the cache.
§Examples
use tensor4all_tcicore::CachedFunction;
let cf = CachedFunction::new(|idx: &[usize]| idx[0], &[4]).unwrap();
cf.eval(&[2]);
assert_eq!(cf.cache_size(), 1);
cf.clear_cache();
assert_eq!(cf.cache_size(), 0);Sourcepub fn cache_size(&self) -> usize
pub fn cache_size(&self) -> usize
Number of cached entries.
§Examples
use tensor4all_tcicore::CachedFunction;
let cf = CachedFunction::new(|idx: &[usize]| idx[0], &[4]).unwrap();
assert_eq!(cf.cache_size(), 0);
cf.eval(&[0]);
cf.eval(&[1]);
assert_eq!(cf.cache_size(), 2);
cf.eval(&[0]); // cache hit, no new entry
assert_eq!(cf.cache_size(), 2);Sourcepub fn is_cached(&self, idx: &[I]) -> bool
pub fn is_cached(&self, idx: &[I]) -> bool
Check if an index is cached.
§Examples
use tensor4all_tcicore::CachedFunction;
let cf = CachedFunction::new(|idx: &[usize]| idx[0], &[4]).unwrap();
assert!(!cf.is_cached(&[1]));
cf.eval(&[1]);
assert!(cf.is_cached(&[1]));Sourcepub fn key_type(&self) -> &'static str
pub fn key_type(&self) -> &'static str
Internal key type name (for debugging).
Returns "u64", "u128", "U256", "U512", "U1024" for
automatically selected types, or "custom" when constructed with
with_key_type.
§Examples
use tensor4all_tcicore::CachedFunction;
// Small index space uses u64
let cf = CachedFunction::new(|idx: &[usize]| 0, &[2, 3]).unwrap();
assert_eq!(cf.key_type(), "u64");Auto Trait Implementations§
impl<V, F, I = usize> !Freeze for CachedFunction<V, F, I>
impl<V, F, I = usize> !RefUnwindSafe for CachedFunction<V, F, I>
impl<V, F, I> Send for CachedFunction<V, F, I>
impl<V, F, I> Sync for CachedFunction<V, F, I>
impl<V, F, I> Unpin for CachedFunction<V, F, I>
impl<V, F, I> UnsafeUnpin for CachedFunction<V, F, I>where
F: UnsafeUnpin,
impl<V, F, I = usize> !UnwindSafe for CachedFunction<V, F, I>
Blanket Implementations§
§impl<U> As for U
impl<U> As for U
§fn as_<T>(self) -> Twhere
T: CastFrom<U>,
fn as_<T>(self) -> Twhere
T: CastFrom<U>,
self to type T. The semantics of numeric casting with the as operator are followed, so <T as As>::as_::<U> can be used in the same way as T as U for numeric conversions. Read moreSource§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
§impl<T> DistributionExt for Twhere
T: ?Sized,
impl<T> DistributionExt for Twhere
T: ?Sized,
fn rand<T>(&self, rng: &mut (impl Rng + ?Sized)) -> Twhere
Self: Distribution<T>,
§impl<T> DistributionExt for Twhere
T: ?Sized,
impl<T> DistributionExt for Twhere
T: ?Sized,
fn rand<T>(&self, rng: &mut (impl Rng + ?Sized)) -> Twhere
Self: Distribution<T>,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more